blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
616
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
69
| license_type
stringclasses 2
values | repo_name
stringlengths 5
118
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringlengths 4
63
| visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 2.91k
686M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 23
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 213
values | src_encoding
stringclasses 30
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 2
10.3M
| extension
stringclasses 246
values | content
stringlengths 2
10.3M
| authors
listlengths 1
1
| author_id
stringlengths 0
212
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
91969ea210ce82e783961566b0f1c27d26bb9132
|
7803d25a8e1424870364b6326c629b57d62258ee
|
/LeetCode/l41.py
|
8928cbabc6446ad9b854a9c534410fdca91a2540
|
[] |
no_license
|
zhaoyu20150930/leetcode-test
|
7318e55f18d5779d3c3b8ec54163df23d57a4c05
|
b6f74dd6b6902272ee04e5029231838f7161d6f5
|
refs/heads/master
| 2021-02-08T06:00:45.237543
| 2020-04-21T07:31:30
| 2020-04-21T07:31:30
| 244,116,872
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,874
|
py
|
from typing import List
"""
给定一个未排序的整数数组,找出其中没有出现的最小的正整数。
示例 1:
输入: [1,2,0]
输出: 3
示例 2:
输入: [3,4,-1,1]
输出: 2
示例 3:
输入: [7,8,9,11,12]
输出: 1
说明:
你的算法的时间复杂度应为O(n),并且只能使用常数级别的空间。
来源:力扣(LeetCode)
链接:https://leetcode-cn.com/problems/first-missing-positive
著作权归领扣网络所有。商业转载请联系官方授权,非商业转载请注明出处。
"""
class Solution:
def firstMissingPositive(self, nums: List[int]) -> int:
c = [0] * ((len(nums) // (4 * 8)) + 1)
m = len(nums)
for i in nums:
if i > m or i < 1:
continue
else:
ci = i // (4 * 8)
di = i % (4 * 8)
c[ci] = 1 << di | c[ci]
for ci, cv in enumerate(c):
if cv != b'1111111111111111':
for i in range(0, 32):
if (1 << i & cv) == 0:
if i + (ci * 4 * 8) != 0:
return (i + (ci * 4 * 8))
else:
continue
return len(c) * 4 * 8
if __name__ == '__main__':
l = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30,
31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58,
59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86,
87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111,
112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127]
print(Solution().firstMissingPositive(l))
|
[
"1097794678@qq.com"
] |
1097794678@qq.com
|
bb3c3f2d1cd1fd17e9ca3b8ade21708132828daf
|
653d557c0a70cefbeb23254774d9104e6ee3c66c
|
/lowdown/manage/verticals/serializers.py
|
b1424664583c70ba774cc2bc668fa7641646444e
|
[] |
no_license
|
brudil/lowdown
|
0b3dda283e927d19b8c55f6d54ca3a8486ff059d
|
247b9a8e21e622c90da339da9615fae6e5dfb8be
|
refs/heads/master
| 2023-01-14T12:19:38.664892
| 2019-12-09T22:48:03
| 2019-12-09T22:48:03
| 97,485,829
| 1
| 0
| null | 2022-12-26T20:58:14
| 2017-07-17T14:28:52
|
Python
|
UTF-8
|
Python
| false
| false
| 426
|
py
|
from rest_framework import serializers
class VerticalSerializer(serializers.Serializer):
name = serializers.CharField()
identifier = serializers.CharField()
audience = serializers.CharField()
content_forms = serializers.ListField(child=serializers.IntegerField())
content_tones = serializers.ListField(child=serializers.IntegerField())
channels = serializers.ListField(child=serializers.CharField())
|
[
"james@brudil.com"
] |
james@brudil.com
|
23c84d81c52c47c7bab2fcbd6a0b508e1c7a9f99
|
247ab484c9f03369ad5d9dae9977ddc9eb7de394
|
/scripts/Clustering_Surface.py
|
0334d00dc3fb023efe2ea064a49a70b3f1e0263e
|
[] |
no_license
|
Erismena/Study_Surface
|
a880149e91e089973da98e555f35b908b3460bea
|
ff8102ec9de92b395906213d88aee852fa713098
|
refs/heads/master
| 2020-03-07T11:47:02.574297
| 2019-04-10T15:20:59
| 2019-04-10T15:20:59
| 127,463,697
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,214
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Fri Mar 23 10:09:51 2018
"""
import matplotlib.pyplot as plt
from matplotlib.patches import Ellipse
import numpy as np
import pims
import scipy.ndimage
import pandas as pd
from scipy.ndimage.morphology import binary_fill_holes
import skimage.measure
import skimage.filters
import skimage.morphology
from matplotlib import cm
import trackpy as tp
import skimage
import cv2
from skimage.filters import threshold_local
import fluids2d.backlight as backlight
import fluids2d.geometry
from datetime import datetime
thresh=500
folder = r'E:\Stephane\171114\\'
cine_name = r'balloon_breakup_pumps_fps10000_backlight_D800minch_d20mm'
c = pims.open(folder+cine_name+'.cine')
im = c[12000][400:800,20:600]
dx = 0.000112656993755
g = fluids2d.geometry.GeometryScaler(dx=dx,im_shape=np.shape(im),origin_pos=(0,0),origin_units='m')
bg = backlight.find_bg_image(c)
plt.figure()
plt.imshow(bg)
stophere
print('--- random walker (red) ---')
start_rw = datetime.now()
rw = backlight.random_walker_detection(im,thresh,g,viz=False)
end_rw = datetime.now()
dur_rw = end_rw - start_rw
print(dur_rw)
df = backlight.labeled_props(rw,g)
vf = backlight.estimate_void_fraction(df)
print(vf)
print('--- watershed (green) ---')
start_ws = datetime.now()
ws = backlight.watershed_detection(im,thresh,g,RadiusDiskMean=1,viz=False) #[550:750,400:600]
end_ws = datetime.now()
dur_ws = end_ws - start_ws
print(dur_ws)
df_ws = backlight.labeled_props(ws,g)
vf = backlight.estimate_void_fraction(df_ws)
print(vf)
print('--- standard (blue) ---')
start_standard = datetime.now()
filled = backlight.get_filled(im,thresh)
df_standard = backlight.filled2regionpropsdf(filled,g=g)
end_standard = datetime.now()
dur_standard = end_standard - start_standard
print(dur_standard)
vf = backlight.estimate_void_fraction(df_standard)
print(vf)
#
#df = backlight.labeled_props(rw,g)
#vf = backlight.estimate_void_fraction(df)
#print(vf)
ax = backlight.show_and_annotate(im,g,df,ax=None,vmin=0,vmax=600)
backlight.add_ellipses_to_ax(df_ws,ax,color=[0,1,0,0.5])
backlight.add_ellipses_to_ax(df_standard,ax,color=[0,0,1,0.5])
#df_all = backlight.run_bubble_detection(c,thresh,g,frames=frames,method='watershed')
|
[
"37940197+Erismena@users.noreply.github.com"
] |
37940197+Erismena@users.noreply.github.com
|
e64353d042c187d8f5b5a688cba81762d490e5e5
|
3be37c8e602c6de2233fe1bc26df7921f90fb400
|
/monitor/views.py
|
5ac8a7c0e83a52e05320e823ffb481a8c67ccb7a
|
[] |
no_license
|
sans123wx/django_learnings
|
18a32198b173b30bc2b1a956f3da11688e324d15
|
1e441dcdacab284a9f9ec0f162a0148191f3252e
|
refs/heads/master
| 2020-04-05T17:42:12.823572
| 2018-11-28T13:37:13
| 2018-11-28T13:37:13
| 157,072,049
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 251
|
py
|
from django.shortcuts import render
from .models import *
# Create your views here.
def monitor(request):
monitors = Monitor.objects.all()
context = {}
context['monitors'] = monitors
return render(request , 'monitor.html' , context)
|
[
"514232900@qq.com"
] |
514232900@qq.com
|
55de3daf1824d85c2ed03562f3b546e1b3e19b98
|
ce76b3ef70b885d7c354b6ddb8447d111548e0f1
|
/problem/good_way.py
|
813179408f61de67e352c578568740bf585e72fd
|
[] |
no_license
|
JingkaiTang/github-play
|
9bdca4115eee94a7b5e4ae9d3d6052514729ff21
|
51b550425a91a97480714fe9bc63cb5112f6f729
|
refs/heads/master
| 2021-01-20T20:18:21.249162
| 2016-08-19T07:20:12
| 2016-08-19T07:20:12
| 60,834,519
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 176
|
py
|
#! /usr/bin/env python
def same_hand(str_arg):
point(str_arg)
print('time')
def point(str_arg):
print(str_arg)
if __name__ == '__main__':
same_hand('time')
|
[
"jingkaitang@gmail.com"
] |
jingkaitang@gmail.com
|
0d5004cdcb65e3215685a01249b2a0cf45215392
|
8015f1c62a2cb4efd21aa8938336913bf8117868
|
/bamap/ba2717.pngMap.py
|
cd46eaffd0ee86a446678bd0c71eb1581b9b5e6d
|
[] |
no_license
|
GamerNoTitle/Beepers-and-OLED
|
675b5e3c179df0f0e27b42bf594c43860d03b9af
|
afe1340e5394ae96bda5f9022a8a66824368091e
|
refs/heads/master
| 2020-04-20T00:09:47.122471
| 2019-04-29T04:59:35
| 2019-04-29T04:59:35
| 168,515,579
| 4
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,468
|
py
|
ba2717.pngMap = [
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000',
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000',
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000',
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000',
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000',
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000',
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000',
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000',
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000',
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000',
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000',
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000',
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000',
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000',
'00000000000000000000000000000000000011111111100000000000000000000000000000000000000000000000000000000000000000000000000000000000',
'00000000000000000000000000000000000011111111110100000000000000000000000000000000000000000000000000000000000000000000000000000000',
'00000000000000000000000000000000001111111111111000000000000000000000000000000000000000000000000000000000000000000000000000000000',
'00000000000000000000000000000000000111111111111100000000000000000000000000000000000000000000000000000000000000000000000000000000',
'00000000000000000000000000000000000011111111111110000000000000000000000000000000000000000000000000000000000000000000000000000000',
'00000000000000000000000000000000000011111111111111000000000000000000000000000000000000000000000000000000000000000000000000000000',
'00000000000000000000000000000000000011111111111110000000000000000000000000000000000000000000000000000000000000000000000000000000',
'00000000000000000000000000000000000101111111101100000000000000000000000000000000000000000000000000000000000000000000000000000000',
'00000000000000000000000000000000000000011110000000000000000000000000000000000000000000000000000000000000000000000000000000000000',
'00000000000000000000000000000000000000111110000000000000000000000000000000000000000000000000000000000000000000000000000000000000',
'00000000000000000000000000000000000000111111100000000000000000000000000000000000000000000000000000000000000000000000000000000000',
'00000000000000000000000000000000000001111111000000000000000000000000000000000000000000000000000000000000000000000000000000000000',
'00000000000000000000000000000000000011111111100000000000000000000000000000000000000010111000000000000000000000000000000000000000',
'00000000000000000000000000000000000011111111110000000000000000000000000000000000000011111100000000000000000000000000000000000000',
'00000000000000000000000000000000001111111111100000000000000000000000000000000000001111111110000000000000000000000000000000000000',
'00000000000000000000000000000000001111111111100000000000000000000000000000000000001011111111110000000000000000000000000000000000',
'00000000000000000000000000000000001111111111010000000000000000000000000000000000001111111111111111000000000000000000000000000000',
'00000000000000000000000000000000001111111111100000000000000000000000000000000000001111111111111111010000000000000000000000000000',
'00000000000000000000000000000000001011111111110000000000000000000000000000000001111111111111111111100000000000000000000000000000',
'00000000000000000000000000000000001011111111100000000000000000000000000000000011111111111111111111100000000000000000000000000000',
'00000000000000000000000000000000000111111111100000000000000000000000000000000111111111111111110001000000000000000000000000000000',
'00000000000000000000000000000000001111111111100000000000000000000000000000001111111111111111110000000000000000000000000000000000',
'00000000000000000000000000000000011111111111000000000000000000000000000000000000110000111111110000000000000000000000000000000000',
'00000000000000000000000000000000001111111111000000000000000000000000000000000000000000111111110000000000000000000000000000000000',
'00000000000000000000000000000000011111111111000000000000000000000000000000000000000000111111100000000000000000000000000000000000',
'00000000000000000000000000000000111111111111000000000000000000000000000000000000001011111111110000000000000000000000000000000000',
'00000000000000000000000000000000111111111111100000000000000000000000000100000000000000011111111111100000000100000000000000000000',
'00000000000000000000000000000000111111111111000000000000000000000000011111110001100000111111110011110000101111110000000000000000',
'00000000000000000000000000000010111111111111000000000000000000000000001111111010000000011111111000001110111111110000000000000000',
'00000000000000000000000000000000111111111111110000000000000000000000000111010000000000011111111100001111111000000000000000000000',
'00000000000000000000000000000010111111111111110000000000000000000000000000000000000000001111111000000010000000000000000000000000',
'00000000000000000000000000000000111111111111110000000000000000000000000000000000000000000111111100000000000000000000000000000000',
'00000000000000000000000000000011111111111111111000000000000000000000000000000000000000000011111100000000000000000000000000000000',
'00000000000000000000000000000011111111111111111000000000000000000000000000000000000000000011111100000000000000000000000000000000',
'00000000000000000000000000000011111111111111111100000000000000000000000000000000000000000011111100000000000000000000000000000000',
'00000000000000000000000000000011111111111111111100000000000000000000000000000000000000000011111100000000000000000000000000000000',
'00000000000000000000000000000011111111111111111100000000000000000000000000000000000000010111111111000000000000000000000000000000',
'00000000000000000000000000000011111111111111111100000000000000000000000000000000000000111111111110000000000000000000000000000000',
'00000000000000000000000000000011111111111111111110000000000000000000000000000000000000111111111111000000000000000000000000000000',
'00000000000000000000000000001011111111111111111111000000000000000000000000000000000000111111111111000000000000000000000000000000',
'00000000000000000000000000000011111111111111111110000000000000000000000000000000000000111111111110100000000000000000000000000000',
'00000000000000000000000000000011111111111111111111000000000000000000000000000000000000111111111111000000000000000000000000000000',
'00000000000000000000000000001111111111111111111111000000000000000000000000000000000001111111111111000000000000000000000000000000',
'00000000000000000000000000001111111111111111111111000000000000000000000000000000000011111111111111000000000000000000000000000000',
'00000000000000000000000000000111111111111111111111000000000000000000000000000000000011111111111111000000000000000000000000000000',
'00000000000000000000000000001111111111111111111111000000000000000000000000000000000011111111111111000000000000000000000000000000',
'00000000000000000000000000001111111111111111111111000000000000000000000000000000000011001111111100100000000000000000000000000000',
'00000000000000000000000000001111111111111111111111100000000000000000000000000000000001001111111101100000000000000000000000000000',
'00000000000000000000000000001111111111111111111111110000000000000000000000000000000000001111111100000000000000000000000000000000',
'00000000000000000000000000011111111111111111111111110000000000000000000000000000000000000111111100000000000000000000000000000000',
]
|
[
"bili33@87ouo.top"
] |
bili33@87ouo.top
|
9a3f47eadd5025612f381c514ac0784d4d231315
|
c8a93730c5e7cbe5ad29314392924dbe1d0ff3f5
|
/ponni13.py
|
64f0944916fafc5ab3dce95f5a11cfc9fab1110d
|
[] |
no_license
|
ponniravi/a23
|
9912666f8a744ce93e32456bd8008db98002e560
|
09e490167007895f1c0e1f6dd9487cd5457df1df
|
refs/heads/master
| 2020-07-04T00:46:44.576253
| 2019-08-14T07:02:50
| 2019-08-14T07:02:50
| 202,102,403
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 100
|
py
|
#p
number=list(map(int,input()))
sum=0
for i in range(len(number)):
sum+=(number[i]**2)
print(sum)
|
[
"noreply@github.com"
] |
ponniravi.noreply@github.com
|
4545abf183e6a439e1bfebb4c3ce53de27c8f66a
|
80480b6191c43a3122d0deba378942cb6052a740
|
/prototype/node_modules/pkcs11js/build/config.gypi
|
555780362dd30806203942852cc023112ec23d55
|
[
"MIT"
] |
permissive
|
RHIE-coder/Project.HLF
|
08f8040e4b415192b7eeeb495ff316d556baf101
|
b5178f9cdb6e4df8286b4b2b1910b44cf5372f17
|
refs/heads/master
| 2022-12-29T18:00:24.650489
| 2020-01-07T08:30:06
| 2020-01-07T08:30:06
| 222,851,809
| 0
| 0
| null | 2022-12-15T04:11:57
| 2019-11-20T04:37:29
|
Go
|
UTF-8
|
Python
| false
| false
| 5,235
|
gypi
|
# Do not edit. File was generated by node-gyp's "configure" step
{
"target_defaults": {
"cflags": [],
"default_configuration": "Release",
"defines": [],
"include_dirs": [],
"libraries": []
},
"variables": {
"asan": 0,
"coverage": "false",
"debug_devtools": "node",
"debug_http2": "false",
"debug_nghttp2": "false",
"force_dynamic_crt": 0,
"gas_version": "2.23",
"host_arch": "x64",
"icu_data_file": "icudt60l.dat",
"icu_data_in": "../../deps/icu-small/source/data/in/icudt60l.dat",
"icu_endianness": "l",
"icu_gyp_path": "tools/icu/icu-generic.gyp",
"icu_locales": "en,root",
"icu_path": "deps/icu-small",
"icu_small": "true",
"icu_ver_major": "60",
"llvm_version": 0,
"node_byteorder": "little",
"node_enable_d8": "false",
"node_enable_v8_vtunejit": "false",
"node_install_npm": "true",
"node_module_version": 57,
"node_no_browser_globals": "false",
"node_prefix": "/",
"node_release_urlbase": "https://nodejs.org/download/release/",
"node_shared": "false",
"node_shared_cares": "false",
"node_shared_http_parser": "false",
"node_shared_libuv": "false",
"node_shared_nghttp2": "false",
"node_shared_openssl": "false",
"node_shared_zlib": "false",
"node_tag": "",
"node_use_bundled_v8": "true",
"node_use_dtrace": "false",
"node_use_etw": "false",
"node_use_lttng": "false",
"node_use_openssl": "true",
"node_use_perfctr": "false",
"node_use_v8_platform": "true",
"node_without_node_options": "false",
"openssl_fips": "",
"openssl_no_asm": 0,
"shlib_suffix": "so.57",
"target_arch": "x64",
"uv_parent_path": "/deps/uv/",
"uv_use_dtrace": "false",
"v8_enable_gdbjit": 0,
"v8_enable_i18n_support": 1,
"v8_enable_inspector": 1,
"v8_no_strict_aliasing": 1,
"v8_optimized_debug": 0,
"v8_promise_internal_field_count": 1,
"v8_random_seed": 0,
"v8_trace_maps": 0,
"v8_use_snapshot": "true",
"want_separate_host_toolset": 0,
"nodedir": "/home/bstudent/.node-gyp/8.11.1",
"standalone_static_library": 1,
"cache_lock_stale": "60000",
"ham_it_up": "",
"legacy_bundling": "",
"sign_git_tag": "",
"user_agent": "npm/5.6.0 node/v8.11.1 linux x64",
"always_auth": "",
"bin_links": "true",
"key": "",
"allow_same_version": "",
"description": "true",
"fetch_retries": "2",
"heading": "npm",
"if_present": "",
"init_version": "1.0.0",
"user": "1001",
"prefer_online": "",
"force": "",
"only": "",
"read_only": "",
"cache_min": "10",
"init_license": "ISC",
"editor": "vi",
"rollback": "true",
"tag_version_prefix": "v",
"cache_max": "Infinity",
"timing": "",
"userconfig": "/home/bstudent/.npmrc",
"engine_strict": "",
"init_author_name": "",
"init_author_url": "",
"tmp": "/tmp",
"depth": "Infinity",
"package_lock_only": "",
"save_dev": "",
"usage": "",
"metrics_registry": "https://registry.npmjs.org/",
"otp": "",
"package_lock": "true",
"progress": "true",
"https_proxy": "",
"save_prod": "",
"cidr": "",
"onload_script": "",
"sso_type": "oauth",
"rebuild_bundle": "true",
"save_bundle": "",
"shell": "/bin/bash",
"dry_run": "",
"prefix": "/home/bstudent/.nvm/versions/node/v8.11.1",
"scope": "",
"browser": "",
"cache_lock_wait": "10000",
"ignore_prepublish": "",
"registry": "https://registry.npmjs.org/",
"save_optional": "",
"searchopts": "",
"versions": "",
"cache": "/home/bstudent/.npm",
"send_metrics": "",
"global_style": "",
"ignore_scripts": "",
"version": "",
"local_address": "",
"viewer": "man",
"node_gyp": "/home/bstudent/.nvm/versions/node/v8.11.1/lib/node_modules/npm/node_modules/node-gyp/bin/node-gyp.js",
"prefer_offline": "",
"color": "true",
"fetch_retry_mintimeout": "10000",
"maxsockets": "50",
"offline": "",
"sso_poll_frequency": "500",
"umask": "0002",
"fetch_retry_maxtimeout": "60000",
"logs_max": "10",
"message": "%s",
"ca": "",
"cert": "",
"global": "",
"link": "",
"access": "",
"also": "",
"save": "true",
"unicode": "true",
"long": "",
"production": "",
"searchlimit": "20",
"unsafe_perm": "true",
"auth_type": "legacy",
"node_version": "8.11.1",
"tag": "latest",
"git_tag_version": "true",
"commit_hooks": "true",
"script_shell": "",
"shrinkwrap": "true",
"fetch_retry_factor": "10",
"save_exact": "",
"strict_ssl": "true",
"dev": "",
"globalconfig": "/home/bstudent/.nvm/versions/node/v8.11.1/etc/npmrc",
"init_module": "/home/bstudent/.npm-init.js",
"parseable": "",
"globalignorefile": "/home/bstudent/.nvm/versions/node/v8.11.1/etc/npmignore",
"cache_lock_retries": "10",
"searchstaleness": "900",
"node_options": "",
"save_prefix": "^",
"scripts_prepend_node_path": "warn-only",
"group": "1001",
"init_author_email": "",
"searchexclude": "",
"git": "git",
"optional": "true",
"json": ""
}
}
|
[
"quotia72@naver.com"
] |
quotia72@naver.com
|
01e528870768f18ecfa0f92dfad9680c8e5d2620
|
f42df9f4102cfe3fc2259df4a60707bd53520ac2
|
/WikId/src/WikId/wiki/admin.py
|
78c3c14a878cff0162bd20d444b81b36b4bb1ecc
|
[] |
no_license
|
martinSternelius/Wikid-django
|
ed0fb2f491f9e4b004d6c60a6af69e5518fc25f0
|
aeacae0a5a6c6740bad88bdb12d28be7eb5cc46e
|
refs/heads/master
| 2016-09-10T23:27:45.399538
| 2010-11-09T09:16:14
| 2010-11-09T09:16:14
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 313
|
py
|
from WikId.wiki.models import Article, Article_section
from django.contrib import admin
class Article_section_inline(admin.StackedInline):
model = Article_section
extra = 1
class Article_admin(admin.ModelAdmin):
inlines = [Article_section_inline]
admin.site.register(Article, Article_admin)
|
[
"WebhallenAB@.(none)"
] |
WebhallenAB@.(none)
|
b46e2c6dabd3f18097262154b19428f887d9cc9f
|
e7890014b32fceb85ff1fc20cec163baa7c756a6
|
/tests/fit_wrapper/test_runner.py
|
62e80d4bd1ee48c3c27e382e1773eaa8cf79f4de
|
[
"MIT"
] |
permissive
|
elejke/pytorch-tools
|
45923c6d658f216f1393f4d51cce4fe390682c3f
|
785527312c0cf929abb3c13e3528a98c03a3d913
|
refs/heads/master
| 2021-01-16T03:20:50.033614
| 2020-02-06T14:21:12
| 2020-02-06T14:21:12
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,249
|
py
|
import os
import pytest
import torch
import torch.nn as nn
import apex
from pytorch_tools.metrics import Accuracy
from pytorch_tools.fit_wrapper import Runner
from pytorch_tools.losses import CrossEntropyLoss
import pytorch_tools.fit_wrapper.callbacks as pt_clb
HIDDEN_DIM = 16
NUM_CLASSES = 10
IMG_SHAPE = 16
LOADER_LEN = 20
BS = 2
class Model(nn.Module):
def __init__(self):
super().__init__()
self.conv1 = nn.Conv2d(3, HIDDEN_DIM, kernel_size=3)
self.bn1 = nn.BatchNorm2d(HIDDEN_DIM)
self.conv2 = nn.Conv2d(HIDDEN_DIM, HIDDEN_DIM, kernel_size=3)
self.pool = nn.AdaptiveAvgPool2d(1)
self.fc = nn.Linear(HIDDEN_DIM, NUM_CLASSES)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.conv2(x)
x = self.pool(x)
x = torch.flatten(x, 1)
x = self.fc(x)
return x
class Loader:
def __init__(self):
self.i = 1
pass
def __iter__(self):
return self
def __len__(self):
return LOADER_LEN
def __next__(self):
img = torch.randn(BS, 3, IMG_SHAPE, IMG_SHAPE)
target = torch.randint(NUM_CLASSES, (BS,))
return img.cuda(), target.cuda()
TestLoader = Loader()
TestModel = Model().cuda()
TestOptimizer = torch.optim.SGD(TestModel.parameters(), lr=1e-3)
TestCriterion = CrossEntropyLoss().cuda()
TestMetric = Accuracy()
TestModel, TestOptimizer = apex.amp.initialize(TestModel, TestOptimizer, verbosity=0)
def test_default():
runner = Runner(
model=TestModel, optimizer=TestOptimizer, criterion=TestCriterion, metrics=TestMetric, callbacks=None
)
runner.fit(TestLoader, epochs=2)
def test_val_loader():
runner = Runner(model=TestModel, optimizer=TestOptimizer, criterion=TestCriterion, metrics=TestMetric,)
runner.fit(TestLoader, epochs=2, steps_per_epoch=100, val_loader=TestLoader, val_steps=200)
# We only test that callbacks don't crash NOT that they do what they should do
TMP_PATH = "/tmp/pt_tools2/"
os.makedirs(TMP_PATH, exist_ok=True)
# TODO: (emil 05.02.20) use pytest parametrize instead
def test_Timer_callback():
runner = Runner(
model=TestModel,
optimizer=TestOptimizer,
criterion=TestCriterion,
metrics=TestMetric,
callbacks=pt_clb.Timer(),
)
runner.fit(TestLoader, epochs=2)
def test_ReduceLROnPlateau_callback():
runner = Runner(
model=TestModel,
optimizer=TestOptimizer,
criterion=TestCriterion,
metrics=TestMetric,
callbacks=pt_clb.ReduceLROnPlateau(),
)
runner.fit(TestLoader, epochs=2)
def test_CheckpointSaver_callback():
runner = Runner(
model=TestModel,
optimizer=TestOptimizer,
criterion=TestCriterion,
metrics=TestMetric,
callbacks=pt_clb.CheckpointSaver(TMP_PATH, save_name="model.chpn"),
)
runner.fit(TestLoader, epochs=2)
def test_FileLogger_callback():
runner = Runner(
model=TestModel,
optimizer=TestOptimizer,
criterion=TestCriterion,
metrics=TestMetric,
callbacks=pt_clb.FileLogger(TMP_PATH),
)
runner.fit(TestLoader, epochs=2)
def test_TensorBoard():
runner = Runner(
model=TestModel,
optimizer=TestOptimizer,
criterion=TestCriterion,
metrics=TestMetric,
callbacks=pt_clb.TensorBoard(log_dir=TMP_PATH),
)
runner.fit(TestLoader, epochs=2)
def test_TensorBoardWithCM():
runner = Runner(
model=TestModel,
optimizer=TestOptimizer,
criterion=TestCriterion,
metrics=TestMetric,
callbacks=pt_clb.TensorBoardWithCM(log_dir=TMP_PATH),
)
runner.fit(TestLoader, epochs=2)
def test_Cutmix():
runner = Runner(
model=TestModel,
optimizer=TestOptimizer,
criterion=TestCriterion,
metrics=TestMetric,
callbacks=pt_clb.Cutmix(1.0, NUM_CLASSES),
)
runner.fit(TestLoader, epochs=2)
def test_Mixup():
runner = Runner(
model=TestModel,
optimizer=TestOptimizer,
criterion=TestCriterion,
metrics=TestMetric,
callbacks=pt_clb.Mixup(0.2, NUM_CLASSES),
)
runner.fit(TestLoader, epochs=2)
|
[
"bonlimezak@gmail.com"
] |
bonlimezak@gmail.com
|
d2cf93a7455e14332521a1b112fc072e5fe86847
|
bba82bce7a124f62f4b9a54fb449969e116d9830
|
/applications/tools/wav2lip.py
|
74f40f6082f4caa9f5b39a44f88567f8f7c14986
|
[
"Python-2.0",
"LicenseRef-scancode-generic-cla",
"Apache-2.0"
] |
permissive
|
moiseshorta/PaddleGAN
|
88680876e9a563cd24647ca4d04a3ff9ef8c2429
|
1d8cd182219e83653570ce03d9be8b5d5a955187
|
refs/heads/develop
| 2023-04-28T16:11:10.494290
| 2021-05-19T02:59:33
| 2021-05-19T02:59:33
| 369,027,654
| 1
| 0
|
Apache-2.0
| 2021-05-19T23:39:24
| 2021-05-19T23:39:23
| null |
UTF-8
|
Python
| false
| false
| 3,396
|
py
|
import argparse
import paddle
from ppgan.apps.wav2lip_predictor import Wav2LipPredictor
parser = argparse.ArgumentParser(
description=
'Inference code to lip-sync videos in the wild using Wav2Lip models')
parser.add_argument('--checkpoint_path',
type=str,
help='Name of saved checkpoint to load weights from',
default=None)
parser.add_argument(
'--audio',
type=str,
help='Filepath of video/audio file to use as raw audio source',
required=True)
parser.add_argument('--face',
type=str,
help='Filepath of video/image that contains faces to use',
required=True)
parser.add_argument('--outfile',
type=str,
help='Video path to save result. See default for an e.g.',
default='results/result_voice.mp4')
parser.add_argument(
'--static',
type=bool,
help='If True, then use only first video frame for inference',
default=False)
parser.add_argument(
'--fps',
type=float,
help='Can be specified only if input is a static image (default: 25)',
default=25.,
required=False)
parser.add_argument(
'--pads',
nargs='+',
type=int,
default=[0, 10, 0, 0],
help=
'Padding (top, bottom, left, right). Please adjust to include chin at least'
)
parser.add_argument('--face_det_batch_size',
type=int,
help='Batch size for face detection',
default=16)
parser.add_argument('--wav2lip_batch_size',
type=int,
help='Batch size for Wav2Lip model(s)',
default=128)
parser.add_argument(
'--resize_factor',
default=1,
type=int,
help=
'Reduce the resolution by this factor. Sometimes, best results are obtained at 480p or 720p'
)
parser.add_argument(
'--crop',
nargs='+',
type=int,
default=[0, -1, 0, -1],
help=
'Crop video to a smaller region (top, bottom, left, right). Applied after resize_factor and rotate arg. '
'Useful if multiple face present. -1 implies the value will be auto-inferred based on height, width'
)
parser.add_argument(
'--box',
nargs='+',
type=int,
default=[-1, -1, -1, -1],
help=
'Specify a constant bounding box for the face. Use only as a last resort if the face is not detected.'
'Also, might work only if the face is not moving around much. Syntax: (top, bottom, left, right).'
)
parser.add_argument(
'--rotate',
default=False,
action='store_true',
help=
'Sometimes videos taken from a phone can be flipped 90deg. If true, will flip video right by 90deg.'
'Use if you get a flipped result, despite feeding a normal looking video')
parser.add_argument(
'--nosmooth',
default=False,
action='store_true',
help='Prevent smoothing face detections over a short temporal window')
parser.add_argument("--cpu", dest="cpu", action="store_true", help="cpu mode.")
parser.add_argument(
"--face_detector",
dest="face_detector",
type=str,
default='sfd',
help="face detector to be used, can choose s3fd or blazeface")
if __name__ == "__main__":
args = parser.parse_args()
if args.cpu:
paddle.set_device('cpu')
predictor = Wav2LipPredictor(args)
predictor.run()
|
[
"noreply@github.com"
] |
moiseshorta.noreply@github.com
|
9d5e174451a029b8858288c3bb342a6bafd87be3
|
a5fb604b0476edd61334f6b01085e24169b0269e
|
/predict.py
|
27e86537e68f16444f44674c9e408b08015808b3
|
[] |
no_license
|
eperry4750/TensorFlow_Image_Classifier
|
2ff984ab02032921ea6f7c4bcdefbb6ce5b62ece
|
22c6dee36eecf94db9ce8fa17abbf47bb05ad5ef
|
refs/heads/main
| 2023-05-25T12:43:09.439699
| 2021-06-12T14:54:26
| 2021-06-12T14:54:26
| 376,171,086
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,529
|
py
|
import numpy as np
import json
import tensorflow as tf
import tensorflow_hub as hub
from argparse import ArgumentParser
from PIL import Image
import warnings
warnings.filterwarnings('ignore')
def process_image(image):
IMAGE_SIZE = 224
image_tensor = tf.convert_to_tensor(image)
resized_image = tf.image.resize(image_tensor, (IMAGE_SIZE, IMAGE_SIZE))/255.0
return resized_image.numpy()
def predict(image_path, model_path, top_k):
im = Image.open(image_path)
image = np.asarray(im)
processed_image = process_image(image)
finished_image = np.expand_dims(processed_image, axis=0)
predictions = model_path.predict(finished_image)
top_predictions = tf.math.top_k(predictions, k = top_k)
return top_predictions[0].numpy(), top_predictions[1].numpy()
def build_argparser():
"""
Parse command line arguments.
:return: command line arguments
"""
parser = ArgumentParser()
parser.add_argument("-m", "--model_path", required=True, type=str,
help="Path to a trained model.")
parser.add_argument("-i", "--image_path", required=True, type=str,
help="Path to an image file")
parser.add_argument("-k", "--top_k", required=False, type=int,
default=1,
help="Return top k probabiliites.")
parser.add_argument("-l", "--label_map", required=False, type=str, default=None,
help="A mapping of classes to real names from a json file")
return parser
def predict_main(args):
model_path = args.model_path
saved_model = tf.keras.models.load_model(model_path, custom_objects={'KerasLayer':hub.KerasLayer})
image_path = args.image_path
top_k = args.top_k
label_map = args.label_map
probs, classes = predict(image_path, saved_model, top_k)
if label_map == None:
for prob, clas in zip(probs[0], classes[0]):
print(prob, clas)
else:
with open(label_map, 'r') as f:
class_names = json.load(f)
for prob, clas in zip(probs[0], [class_names[str(clas + 1)] for clas in classes[0]]):
print(prob, clas)
def main():
"""
Load the network and parse the output.
:return: None
"""
# Grab command line args
args = build_argparser().parse_args()
predict_main(args)
if __name__ == '__main__':
main()
|
[
"noreply@github.com"
] |
eperry4750.noreply@github.com
|
545d3c8e2913d6ceda77346141e4025ccb31fa21
|
304a1fa65079d89a9a5a67e0d29d42f7dc9a89b8
|
/GUI-BankSystem.py
|
d843740bb9f7b7f5aee4da2d296a0f55e4e9ae8a
|
[] |
no_license
|
hsa306/banking-system
|
1ff190e270bd48109a9e751340f839efb142311a
|
6e2897d6b64785721030a66065a29f731448615a
|
refs/heads/main
| 2023-01-10T00:48:13.397449
| 2020-11-16T13:12:01
| 2020-11-16T13:12:01
| 313,123,115
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 10,382
|
py
|
import tkinter as tk
from tkinter import messagebox
from time import gmtime, strftime
def is_number(s):
try:
float(s)
return 1
except ValueError:
return 0
def check_acc_nmb(num):
try:
fpin=open(num+".txt",'r')
except FileNotFoundError:
messagebox.showinfo("Error","Invalid Credentials!\nTry Again!")
return 0
fpin.close()
return
def home_return(master):
master.destroy()
Main_Menu()
def write(master,name,oc,pin):
if( (is_number(name)) or (is_number(oc)==0) or (is_number(pin)==0)or name==""):
messagebox.showinfo("Error","Invalid Credentials\nPlease try again.")
master.destroy()
return
f1=open("Accnt_Record.txt",'r')
accnt_no=int(f1.readline())
accnt_no+=1
f1.close()
f1=open("Accnt_Record.txt",'w')
f1.write(str(accnt_no))
f1.close()
fdet=open(str(accnt_no)+".txt","w")
fdet.write(pin+"\n")
fdet.write(oc+"\n")
fdet.write(str(accnt_no)+"\n")
fdet.write(name+"\n")
fdet.close()
frec=open(str(accnt_no)+"-rec.txt",'w')
frec.write("Date Credit Debit Balance\n")
frec.write(str(strftime("[%Y-%m-%d] [%H:%M:%S] ",gmtime()))+" "+oc+" "+oc+"\n")
frec.close()
messagebox.showinfo("Details","Your Account Number is:"+str(accnt_no))
master.destroy()
return
def crdt_write(master,amt,accnt,name):
if(is_number(amt)==0):
messagebox.showinfo("Error","Invalid Credentials\nPlease try again.")
master.destroy()
return
fdet=open(accnt+".txt",'r')
pin=fdet.readline()
camt=int(fdet.readline())
fdet.close()
amti=int(amt)
cb=amti+camt
fdet=open(accnt+".txt",'w')
fdet.write(pin)
fdet.write(str(cb)+"\n")
fdet.write(accnt+"\n")
fdet.write(name+"\n")
fdet.close()
frec=open(str(accnt)+"-rec.txt",'a+')
frec.write(str(strftime("[%Y-%m-%d] [%H:%M:%S] ",gmtime()))+" "+str(amti)+" "+str(cb)+"\n")
frec.close()
messagebox.showinfo("Operation Successfull!!","Amount Credited Successfully!!")
master.destroy()
return
def debit_write(master,amt,accnt,name):
if(is_number(amt)==0):
messagebox.showinfo("Error","Invalid Credentials\nPlease try again.")
master.destroy()
return
fdet=open(accnt+".txt",'r')
pin=fdet.readline()
camt=int(fdet.readline())
fdet.close()
if(int(amt)>camt):
messagebox.showinfo("Error!!","You dont have that amount left in your account\nPlease try again.")
else:
amti=int(amt)
cb=camt-amti
fdet=open(accnt+".txt",'w')
fdet.write(pin)
fdet.write(str(cb)+"\n")
fdet.write(accnt+"\n")
fdet.write(name+"\n")
fdet.close()
frec=open(str(accnt)+"-rec.txt",'a+')
frec.write(str(strftime("[%Y-%m-%d] [%H:%M:%S] ",gmtime()))+" "+" "+str(amti)+" "+str(cb)+"\n")
frec.close()
messagebox.showinfo("Operation Successfull!!","Amount Debited Successfully!!")
master.destroy()
return
def Cr_Amt(accnt,name):
creditwn=tk.Tk()
creditwn.geometry("600x300")
creditwn.title("Credit Amount")
creditwn.configure(bg="orange")
fr1=tk.Frame(creditwn,bg="blue")
l_title=tk.Message(creditwn,text="UNITED BANK",relief="raised",width=2000,padx=600,pady=0,fg="white",bg="black",justify="center",anchor="center")
l_title.config(font=("Courier","50","bold"))
l_title.pack(side="top")
l1=tk.Label(creditwn,relief="raised",text="Enter Amount to be credited: ")
e1=tk.Entry(creditwn,relief="raised")
l1.pack(side="top")
e1.pack(side="top")
b=tk.Button(creditwn,text="Credit",relief="raised",command=lambda:crdt_write(creditwn,e1.get(),accnt,name))
b.pack(side="top")
creditwn.bind("<Return>",lambda x:crdt_write(creditwn,e1.get(),accnt,name))
def De_Amt(accnt,name):
debitwn=tk.Tk()
debitwn.geometry("600x300")
debitwn.title("Debit Amount")
debitwn.configure(bg="orange")
fr1=tk.Frame(debitwn,bg="blue")
l_title=tk.Message(debitwn,text="UNITED BANK",relief="raised",width=2000,padx=600,pady=0,fg="white",bg="black",justify="center",anchor="center")
l_title.config(font=("Courier","50","bold"))
l_title.pack(side="top")
l1=tk.Label(debitwn,relief="raised",text="Enter Amount to be debited: ")
e1=tk.Entry(debitwn,relief="raised")
l1.pack(side="top")
e1.pack(side="top")
b=tk.Button(debitwn,text="Debit",relief="raised",command=lambda:debit_write(debitwn,e1.get(),accnt,name))
b.pack(side="top")
debitwn.bind("<Return>",lambda x:debit_write(debitwn,e1.get(),accnt,name))
def disp_bal(accnt):
fdet=open(accnt+".txt",'r')
fdet.readline()
bal=fdet.readline()
fdet.close()
messagebox.showinfo("Balance",bal)
def disp_tr_hist(accnt):
disp_wn=tk.Tk()
disp_wn.geometry("900x600")
disp_wn.title("Transaction History")
disp_wn.configure(bg="orange")
fr1=tk.Frame(disp_wn,bg="blue")
l_title=tk.Message(disp_wn,text="UNITED BANK",relief="raised",width=2000,padx=600,pady=0,fg="white",bg="black",justify="center",anchor="center")
l_title.config(font=("Courier","50","bold"))
l_title.pack(side="top")
fr1=tk.Frame(disp_wn)
fr1.pack(side="top")
l1=tk.Message(disp_wn,text="Your Transaction History:",padx=100,pady=20,width=1000,bg="blue",fg="orange",relief="raised")
l1.pack(side="top")
fr2=tk.Frame(disp_wn)
fr2.pack(side="top")
frec=open(accnt+"-rec.txt",'r')
for line in frec:
l=tk.Message(disp_wn,anchor="w",text=line,relief="raised",width=2000)
l.pack(side="top")
b=tk.Button(disp_wn,text="Quit",relief="raised",command=disp_wn.destroy)
b.pack(side="top")
frec.close()
def logged_in_menu(accnt,name):
rootwn=tk.Tk()
rootwn.geometry("1600x500")
rootwn.title("UNITED BANK-"+name)
rootwn.configure(background='orange')
fr1=tk.Frame(rootwn)
fr1.pack(side="top")
l_title=tk.Message(rootwn,text="SIMPLE BANKING\n SYSTEM",relief="raised",width=2000,padx=600,pady=0,fg="white",bg="black",justify="center",anchor="center")
l_title.config(font=("Courier","50","bold"))
l_title.pack(side="top")
label=tk.Label(text="Logged in as: "+name,relief="raised",bg="black",fg="white",anchor="center",justify="center")
label.pack(side="top")
img2=tk.PhotoImage(file="credit.gif")
myimg2=img2.subsample(2,2)
img3=tk.PhotoImage(file="debit.gif")
myimg3=img3.subsample(2,2)
img4=tk.PhotoImage(file="balance1.gif")
myimg4=img4.subsample(2,2)
img5=tk.PhotoImage(file="transaction.gif")
myimg5=img5.subsample(2,2)
b2=tk.Button(image=myimg2,command=lambda: Cr_Amt(accnt,name))
b2.image=myimg2
b3=tk.Button(image=myimg3,command=lambda: De_Amt(accnt,name))
b3.image=myimg3
b4=tk.Button(image=myimg4,command=lambda: disp_bal(accnt))
b4.image=myimg4
b5=tk.Button(image=myimg5,command=lambda: disp_tr_hist(accnt))
b5.image=myimg5
img6=tk.PhotoImage(file="logout.gif")
myimg6=img6.subsample(2,2)
b6=tk.Button(image=myimg6,relief="raised",command=lambda: logout(rootwn))
b6.image=myimg6
b2.place(x=100,y=150)
b3.place(x=100,y=220)
b4.place(x=900,y=150)
b5.place(x=900,y=220)
b6.place(x=500,y=400)
def logout(master):
messagebox.showinfo("Logged Out","You Have Been Successfully Logged Out!!")
master.destroy()
Main_Menu()
def check_log_in(master,name,acc_num,pin):
if(check_acc_nmb(acc_num)==0):
master.destroy()
Main_Menu()
return
if( (is_number(name)) or (is_number(pin)==0) ):
messagebox.showinfo("Error","Invalid Credentials\nPlease try again.")
master.destroy()
Main_Menu()
else:
master.destroy()
logged_in_menu(acc_num,name)
def log_in(master):
master.destroy()
loginwn=tk.Tk()
loginwn.geometry("600x300")
loginwn.title("Log in")
loginwn.configure(bg="orange")
fr1=tk.Frame(loginwn,bg="blue")
l_title=tk.Message(loginwn,text="UNITED BANK",relief="raised",width=2000,padx=600,pady=0,fg="white",bg="black",justify="center",anchor="center")
l_title.config(font=("Courier","50","bold"))
l_title.pack(side="top")
l1=tk.Label(loginwn,text="Enter Name:",relief="raised")
l1.pack(side="top")
e1=tk.Entry(loginwn)
e1.pack(side="top")
l2=tk.Label(loginwn,text="Enter account number:",relief="raised")
l2.pack(side="top")
e2=tk.Entry(loginwn)
e2.pack(side="top")
l3=tk.Label(loginwn,text="Enter your PIN:",relief="raised")
l3.pack(side="top")
e3=tk.Entry(loginwn,show="*")
e3.pack(side="top")
b=tk.Button(loginwn,text="Submit",command=lambda: check_log_in(loginwn,e1.get().strip(),e2.get().strip(),e3.get().strip()))
b.pack(side="top")
b1=tk.Button(text="HOME",relief="raised",bg="black",fg="white",command=lambda: home_return(loginwn))
b1.pack(side="top")
loginwn.bind("<Return>",lambda x:check_log_in(loginwn,e1.get().strip(),e2.get().strip(),e3.get().strip()))
def Create():
crwn=tk.Tk()
crwn.geometry("600x300")
crwn.title("Create Account")
crwn.configure(bg="orange")
fr1=tk.Frame(crwn,bg="blue")
l_title=tk.Message(crwn,text="UNITED BANK",relief="raised",width=2000,padx=600,pady=0,fg="white",bg="black",justify="center",anchor="center")
l_title.config(font=("Courier","50","bold"))
l_title.pack(side="top")
l1=tk.Label(crwn,text="Enter Name:",relief="raised")
l1.pack(side="top")
e1=tk.Entry(crwn)
e1.pack(side="top")
l2=tk.Label(crwn,text="Enter opening credit:",relief="raised")
l2.pack(side="top")
e2=tk.Entry(crwn)
e2.pack(side="top")
l3=tk.Label(crwn,text="Enter desired PIN:",relief="raised")
l3.pack(side="top")
e3=tk.Entry(crwn,show="*")
e3.pack(side="top")
b=tk.Button(crwn,text="Submit",command=lambda: write(crwn,e1.get().strip(),e2.get().strip(),e3.get().strip()))
b.pack(side="top")
crwn.bind("<Return>",lambda x:write(crwn,e1.get().strip(),e2.get().strip(),e3.get().strip()))
return
def Main_Menu():
rootwn=tk.Tk()
rootwn.geometry("1600x500")
rootwn.title("UNITED Bank")
rootwn.configure(background='orange')
fr1=tk.Frame(rootwn)
fr1.pack(side="top")
bg_image = tk.PhotoImage(file ="pile1.gif")
x = tk.Label (image = bg_image)
x.place(y=-400)
l_title=tk.Message(text="SIMPLE BANKING\n SYSTEM",relief="raised",width=2000,padx=600,pady=0,fg="white",bg="black",justify="center",anchor="center")
l_title.config(font=("Courier","50","bold"))
l_title.pack(side="top")
imgc1=tk.PhotoImage(file="new.gif")
imglo=tk.PhotoImage(file="login.gif")
imgc=imgc1.subsample(2,2)
imglog=imglo.subsample(2,2)
b1=tk.Button(image=imgc,command=Create)
b1.image=imgc
b2=tk.Button(image=imglog,command=lambda: log_in(rootwn))
b2.image=imglog
img6=tk.PhotoImage(file="quit.gif")
myimg6=img6.subsample(2,2)
b6=tk.Button(image=myimg6,command=rootwn.destroy)
b6.image=myimg6
b1.place(x=800,y=300)
b2.place(x=800,y=200)
b6.place(x=920,y=400)
rootwn.mainloop()
Main_Menu()
|
[
"noreply@github.com"
] |
hsa306.noreply@github.com
|
12401c84977b036427a9104776abcfb54dae2926
|
9923e30eb99716bfc179ba2bb789dcddc28f45e6
|
/autorest/python/swagger/models/tagged_sensor.py
|
1f7a06e5448848378589dfcd960293e1d3a51f99
|
[] |
no_license
|
silverspace/samsara-sdks
|
cefcd61458ed3c3753ac5e6bf767229dd8df9485
|
c054b91e488ab4266f3b3874e9b8e1c9e2d4d5fa
|
refs/heads/master
| 2020-04-25T13:16:59.137551
| 2019-03-01T05:49:05
| 2019-03-01T05:49:05
| 172,804,041
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 872
|
py
|
# coding=utf-8
# --------------------------------------------------------------------------
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .tagged_sensor_base import TaggedSensorBase
class TaggedSensor(TaggedSensorBase):
"""TaggedSensor.
:param id: The ID of the Sensor being tagged.
:type id: long
:param name: Name of the Sensor being tagged.
:type name: str
"""
_validation = {
'id': {'required': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'long'},
'name': {'key': 'name', 'type': 'str'},
}
def __init__(self, id, name=None):
super(TaggedSensor, self).__init__(id=id)
self.name = name
|
[
"greg@samsara.com"
] |
greg@samsara.com
|
69fd19b7e5f51b9162f599f3f476a93584fafad7
|
966e738cc124f1e648a1147340505f7de0978c6f
|
/CISTER_image_processing/devel/lib/python2.7/dist-packages/image_processing/msg/_coords.py
|
c2af9c84b1230e81f59dd46153d597f895b1324e
|
[
"Apache-2.0"
] |
permissive
|
pmssantos/copaDrive
|
1a81ee4f6ab5241f5d4643e3b6dbd73fadd4a45d
|
56439ce311d6624840ddd4813780bd0117e26b47
|
refs/heads/master
| 2023-05-05T02:40:53.283905
| 2021-05-21T23:39:10
| 2021-05-21T23:39:10
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,715
|
py
|
# This Python file uses the following encoding: utf-8
"""autogenerated by genpy from image_processing/coords.msg. Do not edit."""
import sys
python3 = True if sys.hexversion > 0x03000000 else False
import genpy
import struct
class coords(genpy.Message):
_md5sum = "07271e516a6d3af46ff39e3801eabeb2"
_type = "image_processing/coords"
_has_header = False #flag to mark the presence of a Header object
_full_text = """float32 X2
float32 X1
float32 dif_X
float32 Y2
float32 Y1
float32 dif_Y
float32 slope
float32 length
float32 intercept
"""
__slots__ = ['X2','X1','dif_X','Y2','Y1','dif_Y','slope','length','intercept']
_slot_types = ['float32','float32','float32','float32','float32','float32','float32','float32','float32']
def __init__(self, *args, **kwds):
"""
Constructor. Any message fields that are implicitly/explicitly
set to None will be assigned a default value. The recommend
use is keyword arguments as this is more robust to future message
changes. You cannot mix in-order arguments and keyword arguments.
The available fields are:
X2,X1,dif_X,Y2,Y1,dif_Y,slope,length,intercept
:param args: complete set of field values, in .msg order
:param kwds: use keyword arguments corresponding to message field names
to set specific fields.
"""
if args or kwds:
super(coords, self).__init__(*args, **kwds)
#message fields cannot be None, assign default values for those that are
if self.X2 is None:
self.X2 = 0.
if self.X1 is None:
self.X1 = 0.
if self.dif_X is None:
self.dif_X = 0.
if self.Y2 is None:
self.Y2 = 0.
if self.Y1 is None:
self.Y1 = 0.
if self.dif_Y is None:
self.dif_Y = 0.
if self.slope is None:
self.slope = 0.
if self.length is None:
self.length = 0.
if self.intercept is None:
self.intercept = 0.
else:
self.X2 = 0.
self.X1 = 0.
self.dif_X = 0.
self.Y2 = 0.
self.Y1 = 0.
self.dif_Y = 0.
self.slope = 0.
self.length = 0.
self.intercept = 0.
def _get_types(self):
"""
internal API method
"""
return self._slot_types
def serialize(self, buff):
"""
serialize message into buffer
:param buff: buffer, ``StringIO``
"""
try:
_x = self
buff.write(_get_struct_9f().pack(_x.X2, _x.X1, _x.dif_X, _x.Y2, _x.Y1, _x.dif_Y, _x.slope, _x.length, _x.intercept))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize(self, str):
"""
unpack serialized message in str into this message instance
:param str: byte array of serialized message, ``str``
"""
try:
end = 0
_x = self
start = end
end += 36
(_x.X2, _x.X1, _x.dif_X, _x.Y2, _x.Y1, _x.dif_Y, _x.slope, _x.length, _x.intercept,) = _get_struct_9f().unpack(str[start:end])
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
def serialize_numpy(self, buff, numpy):
"""
serialize message with numpy array types into buffer
:param buff: buffer, ``StringIO``
:param numpy: numpy python module
"""
try:
_x = self
buff.write(_get_struct_9f().pack(_x.X2, _x.X1, _x.dif_X, _x.Y2, _x.Y1, _x.dif_Y, _x.slope, _x.length, _x.intercept))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize_numpy(self, str, numpy):
"""
unpack serialized message in str into this message instance using numpy for array types
:param str: byte array of serialized message, ``str``
:param numpy: numpy python module
"""
try:
end = 0
_x = self
start = end
end += 36
(_x.X2, _x.X1, _x.dif_X, _x.Y2, _x.Y1, _x.dif_Y, _x.slope, _x.length, _x.intercept,) = _get_struct_9f().unpack(str[start:end])
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
_struct_I = genpy.struct_I
def _get_struct_I():
global _struct_I
return _struct_I
_struct_9f = None
def _get_struct_9f():
global _struct_9f
if _struct_9f is None:
_struct_9f = struct.Struct("<9f")
return _struct_9f
|
[
"enpvf@isep.ipp.pt"
] |
enpvf@isep.ipp.pt
|
36c83d1f224f722a5d5c6e306c21a837d0e6ab2b
|
71d7398290d3f88aab4a8afd75be0408ee677e31
|
/apps/goods/serializers.py
|
7931dc6d9341875e24a2dc7c8f7f51c11380c3f8
|
[] |
no_license
|
LIpengcccc/MxShop
|
d8e6ae34d836845b6f3d7bd74e551dbe5ad74c88
|
2f16c942ab4ac02f7e4ea2f8f56e2c30f892c5bb
|
refs/heads/master
| 2022-12-14T05:21:20.353791
| 2019-11-09T14:40:04
| 2019-11-09T14:40:04
| 220,652,926
| 0
| 0
| null | 2022-12-08T07:31:43
| 2019-11-09T14:20:21
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 3,543
|
py
|
from django.db.models import Q
from rest_framework import serializers
from .models import Goods, GoodsCategory, GoodsImage, Banner, IndexAd, GoodsCategoryBrand, HotSearchWords
class CategorySerializer3(serializers.ModelSerializer):
class Meta:
model = GoodsCategory
# fields = ('name', 'click_num', 'market_price', 'add_time')
fields = '__all__'
class CategorySerializer2(serializers.ModelSerializer):
sub_cat = CategorySerializer3(many=True)
class Meta:
model = GoodsCategory
# fields = ('name', 'click_num', 'market_price', 'add_time')
fields = '__all__'
class CategorySerializer(serializers.ModelSerializer):
sub_cat = CategorySerializer2(many=True)
class Meta:
model = GoodsCategory
# fields = ('name', 'click_num', 'market_price', 'add_time')
fields = '__all__'
class GoodsImageSerializer(serializers.ModelSerializer):
class Meta:
model = GoodsImage
fields = ("image",)
class GoodsSerializer(serializers.ModelSerializer):
category = CategorySerializer()
# images是数据库中设置的related_name="images",把轮播图嵌套进来
images = GoodsImageSerializer(many=True)
class Meta:
model = Goods
# fields = ('name', 'click_num', 'market_price', 'add_time')
fields = '__all__'
class BannerSerializer(serializers.ModelSerializer):
'''
轮播图
'''
class Meta:
model = Banner
fields = "__all__"
class BrandSerializer(serializers.ModelSerializer):
'''
大类下面的宣传商标
'''
class Meta:
model = GoodsCategoryBrand
fields = "__all__"
class IndexCategorySerializer(serializers.ModelSerializer):
# 某个大类的商标,可以有多个商标,一对多的关系
brands = BrandSerializer(many=True)
# good有一个外键category,但这个外键指向的是三级类,直接反向通过外键category(三级类),取某个大类下面的商品是取不出来的
goods = serializers.SerializerMethodField()
# 在parent_category字段中定义的related_name="sub_cat"
# 取二级商品分类
sub_cat = CategorySerializer2(many=True)
# 广告商品
ad_goods = serializers.SerializerMethodField()
def get_ad_goods(self, obj):
goods_json = {}
ad_goods = IndexAd.objects.filter(category_id=obj.id, )
if ad_goods:
# 取到这个商品Queryset[0]
good_ins = ad_goods[0].goods
# 在serializer里面调用serializer的话,就要添加一个参数context(上下文request),嵌套serializer必须加
# serializer返回的时候一定要加 “.data” ,这样才是json数据
goods_json = GoodsSerializer(good_ins, many=False, context={'request': self.context['request']}).data
return goods_json
# 自定义获取方法
def get_goods(self, obj):
# 将这个商品相关父类子类等都可以进行匹配
all_goods = Goods.objects.filter(Q(category_id=obj.id) | Q(category__parent_category_id=obj.id) | Q(
category__parent_category__parent_category_id=obj.id))
goods_serializer = GoodsSerializer(all_goods, many=True, context={'request': self.context['request']})
return goods_serializer.data
class Meta:
model = GoodsCategory
fields = "__all__"
class HotWordsSerializer(serializers.ModelSerializer):
class Meta:
model = HotSearchWords
fields = "__all__"
|
[
"1014741954@qq.com"
] |
1014741954@qq.com
|
913f9d79cbda01e06d0e01bb1575e92a1e90b1cf
|
311334ddd79553353c493e8bb7d0228ee427715f
|
/CSV_Join.py
|
ceadeb414a90069cca27440e1a08773974c28124
|
[
"MIT"
] |
permissive
|
it-fm/SRTM_to_STL
|
8e28f06f13ee2db40a3f147ebd283a23e4e3591d
|
2b29031ce6d4031118b79a83193d97e14b9cf10e
|
refs/heads/master
| 2021-09-15T14:03:35.542200
| 2018-06-03T21:07:43
| 2018-06-03T21:07:43
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,961
|
py
|
## Code for the CSV joiner
## Run from the command line as:
## python CSV_Join.py filename1.csv filename2.csv
## Joins two SRTM csv files of the same size into one
## The code will attempt to join non-adjacent files so long as they share lat or lon!
import numpy
import sys
if __name__ == '__main__':
try:
print 'Joining CSV files'
filename1 = ''
filename2 = ''
#filename1 = 'S35E018.csv' # Cape Town S
#filename2 = 'S34E018.csv' # Cape Town N
#filename1 = 'S35E019.csv' #
#filename2 = 'S34E019.csv' #
#filename1 = 'S35E018_join.csv' #
#filename2 = 'S35E019_join.csv' #
#filename1 = 'N54W004.csv' # UK Lake District W
#filename2 = 'N54W003.csv' # UK Lake District E
if filename1 == '' or filename2 == '':
# Check if the csv filenames were passed in argv (e.g. "python CSV_Join.py N54W004.csv N54W003.csv")
if len(sys.argv) > 2:
filename1 = sys.argv[1]
filename2 = sys.argv[2]
if filename1 == '' or filename2 == '':
# Ask for the two csv filenames
filename1 = raw_input('Enter the 1st csv filename: ')
filename2 = raw_input('Enter the 2nd csv filename: ')
# Find the starting lats & lons from the filenames
start_lon1 = float(filename1[4:7])
if filename1[3] == 'W': start_lon1 = -start_lon1
start_lat1 = float(filename1[1:3])
if filename1[0] == 'S': start_lat1 = -start_lat1
start_lon2 = float(filename2[4:7])
if filename2[3] == 'W': start_lon2 = -start_lon2
start_lat2 = float(filename2[1:3])
if filename2[0] == 'S': start_lat2 = -start_lat2
# Check if files need to be joined L-R or T-B
if start_lat1 == start_lat2:
print 'Files have equal latitude. Joining Left-Right.'
LR = True
if start_lon1 < start_lon2:
print 'File order does not need to be changed.'
else:
print 'Swapping file order.'
filename = filename1 # Swap filenames
filename1 = filename2
filename2 = filename
elif start_lon1 == start_lon2:
print 'Files have equal longitude. Joining Top-Bottom.'
LR = False
if start_lat1 < start_lat2:
print 'File order does not need to be changed.'
else:
print 'Swapping file order.'
filename = filename1 # Swap filenames
filename1 = filename2
filename2 = filename
else:
raise Exception('Files do not have same lat or lon!')
outfile = str(filename1[:-4] + '_join' + filename1[-4:])
print 'Processing',filename1,'and',filename2
print 'Outputting data to',outfile
try:
# read data from file1 as float
east1,north1,hgt1 = numpy.loadtxt(filename1,delimiter=',',unpack=True)
except:
raise Exception('Invalid file!')
width1 = int(east1[0]) # Get the width of file1
height1 = int(north1[0]) # Get the height of file1
hgt_max1 = int(hgt1[0]) # Get the max_height of file1
print filename1,': Width',width1,'Height',height1,'Max_Height',hgt_max1
east1 = east1[1:] # Discard the width
north1 = north1[1:] # Discard the height
hgt1 = hgt1[1:] # Discard the max_height
points1 = len(hgt1) # Check the number of points is correct
if points1 != width1 * height1:
raise Exception('Invalid file!')
# Reshape file1 data into Y,X format
east1 = numpy.reshape(east1,(height1,-1))
north1 = numpy.reshape(north1,(height1,-1))
hgt1 = numpy.reshape(hgt1,(height1,-1))
try:
# read data from file2 as float
east2,north2,hgt2 = numpy.loadtxt(filename2,delimiter=',',unpack=True)
except:
raise Exception('Invalid file!')
width2 = int(east2[0]) # Get the width of file2
height2 = int(north2[0]) # Get the height of file2
hgt_max2 = int(hgt2[0]) # Get the max_height of file2
print filename2,': Width',width2,'Height',height2,'Max_Height',hgt_max2
east2 = east2[1:] # Discard the width
north2 = north2[1:] # Discard the height
hgt2 = hgt2[1:] # Discard the max_height
points2 = len(hgt2) # Check the number of points is correct
if points2 != width2 * height2:
raise Exception('Invalid file!')
# Reshape file2 data into Y,X format
east2 = numpy.reshape(east2,(height2,-1))
north2 = numpy.reshape(north2,(height2,-1))
hgt2 = numpy.reshape(hgt2,(height2,-1))
# Check files share either width or height
if ((width1 != width2) and (height1 != height2)):
raise Exception('Files do not share width or height!')
if LR:
# remove duplicated column
hgt1 = hgt1[:,:-1]
east1 = east1[:,:-1]
north1 = north1[:,:-1]
width1 -= 1
points1 -= height1
# join the data
hgt = numpy.concatenate((hgt1,hgt2),1)
east = numpy.concatenate((east1,east2),1)
north = numpy.concatenate((north1,north2),1)
width = width1 + width2
height = height1
points = points1 + points2
hgt_max = hgt.max()
else:
# remove duplicated row
hgt1 = hgt1[1:,:]
east1 = east1[1:,:]
north1 = north1[1:,:]
height1 -= 1
points1 -= width1
# join the data
hgt = numpy.concatenate((hgt2,hgt1),0)
east = numpy.concatenate((east2,east1),0)
north = numpy.concatenate((north2,north1),0)
width = width1
height = height1 + height2
points = points1 + points2
hgt_max = hgt.max()
# Convert back to a 1D array
hgt = numpy.ravel(hgt)
east = numpy.ravel(east)
north = numpy.ravel(north)
print 'Width',width
print 'Height',height
print 'Max Height',hgt_max
print 'Top Left:',int(east[0]),int(north[0])
print 'Bottom Right:',int(east[points-1]),int(north[points-1])
# Save the joined data
print 'Saving to',outfile
fp = open(outfile,'w')
outstr = str(width) + ',' + str(height) + ',' + str(int(hgt_max)) + '\n'
fp.write(outstr)
for l in range(points):
outstr = str(int(east[l])) + ',' + str(int(north[l])) + ',' + str(int(hgt[l])) + '\n'
fp.write(outstr)
fp.close()
print 'Complete!'
except KeyboardInterrupt:
print 'CTRL+C received...'
finally:
print 'Bye!'
|
[
"pc235603@gmail.com"
] |
pc235603@gmail.com
|
3b2c176863a5e194f22e386c5b7f32f37633bd8a
|
c2806d4c1044d4e26b46127bb5d9bdf6e8e42d0a
|
/hardware/hameg.py
|
a65aa91f9ca4523543a634dbd45b6d6d0d4c8997
|
[] |
no_license
|
Faridelnik/Pi3Diamond
|
f7876cc878793f13037b3007bd432fd9e62b0eb9
|
62e05ccb365d7f0b8c52aaa7be3723dd16d83689
|
refs/heads/master
| 2021-01-22T12:38:08.801473
| 2017-10-10T08:37:13
| 2017-10-10T08:37:13
| 102,353,439
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 10,393
|
py
|
'''
Created on 20.04.2012
author: Helmut Fedder
'''
import time
import numpy as np
# helper class to represent a visa instrument via a socket
class SocketInstrument():
def __init__(self, device):
import socket
host,port = device.split(':')
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect((host, port))
self.sock = sock
def write(self, cmd):
"""Sends a command over the socket"""
cmd_string = cmd + '\n'
sent = self.sock.sendall(cmd_string)
if sent != None:
raise RuntimeError('Transmission failed')
time.sleep(.1) #add a timeout for the transfer to take place. Should be replaced by a proper error handling at some point
def ask(self,question):
"""sends the question and receives the answer"""
self.write(question)
answer = self.sock.recv(2048)#2000
return answer[:-2]
def close(self):
self.sock.close()
class HMP2030():
def __init__(self,device, voltage_max=20.0, current_max=2.0, fuse_voltage_max=20.0, fuse_current_max=2.5):
"""
Provides communication with a HMP2030 power supply
via USB (virtual COM) or LAN.
Usage Examples:
hmp = HMP2030('ASRL11::INSTR')
hmp = HMP2030('192.168.0.11:50000')
Parameters:
device: string that describes the device (see examples above)
Optional Parameters:
voltage_max: maximum allowed voltage
current_max: maximum allowed current
fuse_voltage_max: maximum allowed fuse voltage
fuse_current_max: maximum allowed fuse current
"""
if '::' in device:
self._connect_serial(device)
else:
self._connect_lan(device)
self.voltage_max=voltage_max
self.current_max=current_max
self.fuse_voltage_max=fuse_voltage_max
self.fuse_current_max=fuse_current_max
def _connect_serial(self, device):
import visa
instr=visa.instrument('ASRL11::INSTR')
instr.term_chars='\n'
instr.chunk_size=4096
instr.timeout=1.0
self.instr = instr
def _connect_lan(self, device):
"""connects to the hameg powersupply"""
self.instr=SocketInstrument(device)
# convenience method
def set_output(self,channel,current):
"""Set the current on the given channel. Turn output on or off depending on the specified current."""
self.set_ch(channel)
if current<=0 or current is None:
self.stop()
else:
self.set_current(current)
self.run()
#functions to perform different SCPI-commands
def set_ch(self,ch):
"""sets the channel 1, 2 or 3"""
if ch in [1,2,3]:
self.instr.write('INST OUPT' + str(ch))
else:
raise ValueError('Wrong channel number. Chose 1, 2 or 3.')
def get_ch(self):
"""asks for the selected channel"""
channel = int(self.instr.ask('INST:NSEL?'))
return channel
def status(self,ch):
"""gets the current status of the selected channel (CC or CV)"""
state = self.instr.ask('STAT:QUES:INST:ISUM' + str(ch) + ':COND?')
if state == 1:
return 'CC'
elif state == 2:
return 'CV'
else:
print "Couldn't read the status of the selected channel."
def set_voltage(self,volt):
"""sets the voltage to the desired value"""
if volt < 0:
print 'The selected voltage cannot be set.'
elif volt > self.voltage_max : #the voltage_max will be set on the power supply if volt exceed voltage_max
self.instr.write('VOLT %1.3f' %self.voltage_max)
print 'The set voltage exceed the maximum voltage: %1.3f' %self.voltage_max
else:
self.instr.write('VOLT %1.3f' %volt)
def set_voltage_step(self,vstep):
"""increases the voltage by a desired step"""
vset = get_voltage()
set_voltage(vset + vstep)
def get_voltage(self):
"""measures the voltage"""
voltage = float(self.instr.ask('MEAS:VOLT?'))
return voltage
def set_current(self,curr):
"""sets the current to the desired value"""
if curr < 0:
print 'The selected current cannot be set.'
elif curr > self.current_max : #the voltage_max will be set on the power supply if volt exceed voltage_max
self.instr.write('CURR %1.3f' %self.current_max)
print 'The set current exceed the maximum current: %1.3f' %self.current_max
else:
self.instr.write('CURR %1.3f' %curr)
def set_current_step(self,cstep):
"""increases the current by a desired step"""
cset = get_current()
set_current(cset + cstep)
def get_current(self):
"""measures the current"""
current = float(self.instr.ask('MEAS:CURR?'))
return current
def set_arbitrary(self,ch, seq, N):
"""performs a sequence of voltage and current values for a given time on one channel with a number of repetitions.
ch: channel for output
seq: sequence to be set in form of a nested list = [(voltage,current,time),(..),(..),...]
N: number of repetitions [1..255]. 0 means infinite repetitions."""
seq_ary = np.array(seq)
if max(seq_ary[:,0]) > self.voltage_max:
print 'The set voltage exceed the maximum voltage: %1.3f' %self.voltage_max
elif max(seq_ary[:,1]) > self.current_max:
print 'The set current exceed the maximum current: %1.3f' %self.current_max
elif min(seq_ary[:,2]) < .5:
print 'The set time is shorter than 0.5s.'
elif seq >= 0:
print 'Negative value of voltage, current or time.'
elif ch != [1,2,3]:
print 'Wrong channel number. Chose 1, 2 or 3.'
elif N != range(0,256):
print 'The set repetitions are outside the range [0,255].'
else:
self.instr.write('ARB:DATA' + ' ' + str(seq).translate(None, '[()] '))
self.instr.write('ARB:REP' + ' ' + str(N))
self.instr.write('ARB:TRANS' + ' ' + str(ch))
self.instr.write('ARB:STAR' + ' ' + str(ch))
set_ch(ch)
run()
def stop_arbitrary(self,ch):
"""stops the arbitrary sequence of a specified channel ch, but leafs the output on."""
self.instr.write('ARB:STOP' + ' ' + str(ch))
def get_arbitrary(self,ch):
"""gets the number of performed repetitions of the arbitrary sequence"""
set_ch(ch)
num = int(self.instr.ask('ARB:REP?'))
return num
def get_all(self):
"""gets the measured values for all channels in the form [(ch,V,A),]"""
l = []
for i in [1,2,3]:
set_ch(i)
vset = get_voltage()
cset = get_current()
l.append((i,vset,cset))
return l
def run(self):
"""turns the output from the chosen channel on"""
self.instr.write('OUTP ON')
def run_all(self):
"""turns the output from all channels on"""
set_ch(1)
self.instr.write('OUTP:SEL ON')
set_ch(2)
self.instr.write('OUTP:SEL ON')
set_ch(3)
self.instr.write('OUTP:SEL ON')
self.instr.write('OUTP:GEN ON')
def stop(self):
"""turns the output from the chosen channel off"""
self.instr.write('OUTP OFF')
def stop_all(self):
"""stops the output of all channels"""
set_ch(1)
self.instr.write('OUTP:SEL OFF')
set_ch(2)
self.instr.write('OUTP:SEL OFF')
set_ch(3)
self.instr.write('OUTP:SEL OFF')
self.instr.write('OUTP:GEN OFF')
def start(self):
"""starts up the whole system"""
self.instr.write('*RST') #resets the device
self.instr.write('SYST:REM') #sets the instrument to remote control
def close(self):
"""stops and disconnects the device"""
stop_all()
self.instr.close()
def beep(self):
"""gives an acoustical signal from the device"""
self.instr.write('SYST:BEEP')
def error_list(self):
"""prints all errors from the error register."""
error = str(self.instr.ask('SYST:ERR?'))
return error
def OVP(self,fuse_voltage_max):
"""sets the Over-Voltage-Protection to the value fuse_voltage_max for a selected channel"""
if fuse_voltage_max < 0:
print 'The selected value for voltage protection cannot be set.'
elif fuse_voltage_max > 32.0: #the maximal voltage which the HMP2030 supplies
print 'The set voltage exceed the maximum voltage: 32V'
else:
self.instr.write('VOLT:PROT %1.3f' %fuse_voltage_max)
def FUSE(self,fuse_current_max):
"""sets the fuse to the value fuse_current_max and the delay time to 0ms for a selected channel"""
self.instr.write('FUSE ON')
if fuse_current_max < 0:
print 'The selected value for current fuse cannot be set.'
elif fuse_current_max > 5.0: #the maximal current which the HMP2030 supplies
print 'The set current exceed the maximum current: 5A'
else:
self.instr.write('CURR %1.3f' %fuse_current_max)
self.instr.write('FUSE:DEL 0')
class BipolarHMP2030(HMP2030):
#funktions to reverse the polarity
def set_polarity(self,ch,p):
"""sets the polarity p of a given channel"""
pass
def get_polarity(self,ch):
"""gets the polarity p of a given channel"""
pass
#-------------------------------------------------------------------------------------------------------------------------
#define the sub-function including:set_channel, set_voltage, set_current and run
#def set(ch):
# write('SYST REM')
# write('INST OUPT1')
# write('OUTP OFF')
#
#write('SYST REM') #to remote control
#write('INST OUPT1') #select channel 1
#stop()
|
[
"shagieva.farida@physics.msu.ru"
] |
shagieva.farida@physics.msu.ru
|
978c93ab7716a070e4b5c045ab3c4fac5825d8e4
|
7bbe7bf766dcf2bdf6b129ece0b78b5406a93adc
|
/python/20.py
|
0642a43dc2916f147a36bcea87a04511465a5e1b
|
[] |
no_license
|
ottiferous/euler
|
8261c17139374777e14c17748e6e5ac9e8968aa2
|
4eebf05941fdada5c4c6b605864d61ea386e7f1b
|
refs/heads/master
| 2021-01-18T21:33:16.732445
| 2016-05-24T23:55:48
| 2016-05-24T23:55:48
| 2,253,811
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 104
|
py
|
result = 0
for x in str(reduce(lambda x,y: x*y, xrange(1, 100), 1)):
result += int(x)
print result
|
[
"github@andrewmarrone.com"
] |
github@andrewmarrone.com
|
01ea9b89c21c2662978b5f1fcd58afc9ebd35ada
|
e3365bc8fa7da2753c248c2b8a5c5e16aef84d9f
|
/indices/nnsingula.py
|
2760b64a1532a4d213c8fae350fa41d153d1278c
|
[] |
no_license
|
psdh/WhatsintheVector
|
e8aabacc054a88b4cb25303548980af9a10c12a8
|
a24168d068d9c69dc7a0fd13f606c080ae82e2a6
|
refs/heads/master
| 2021-01-25T10:34:22.651619
| 2015-09-23T11:54:06
| 2015-09-23T11:54:06
| 42,749,205
| 2
| 3
| null | 2015-09-23T11:54:07
| 2015-09-18T22:06:38
|
Python
|
UTF-8
|
Python
| false
| false
| 104
|
py
|
ii = [('WilkJMC2.py', 4), ('GilmCRS.py', 1), ('WestJIT2.py', 2), ('DibdTRL2.py', 1), ('MereHHB3.py', 1)]
|
[
"varunwachaspati@gmail.com"
] |
varunwachaspati@gmail.com
|
06079a88e0315ff4a3ca5c78262cba05ed302bef
|
279f46617264ffa386b600f05ddf9e15df41b596
|
/ch7_funtinal_api.py
|
2bb49895adb3e004f7c285fdb1aedbacc63e831e
|
[] |
no_license
|
oakareahio/deeplearning_with_python
|
98636831bd6e84046d8fb7548390acc9221d9ba7
|
1248370b9bb4a530908110b1f289ae5eaa77b422
|
refs/heads/master
| 2022-12-07T11:43:13.671594
| 2019-09-14T06:02:51
| 2019-09-14T06:02:51
| 208,393,641
| 0
| 0
| null | 2022-11-22T04:14:40
| 2019-09-14T05:27:02
|
Python
|
UTF-8
|
Python
| false
| false
| 619
|
py
|
from keras import Input, layers
from keras.models import Model
input_tensor = Input(shape=(64,))
x = layers.Dense(32, activation='relu')(input_tensor)
x = layers.Dense(32, activation='relu')(x)
output_tensor = layers.Dense(10, activation='softmax')(x)
model = Model(input_tensor, output_tensor)
model.compile(optimizer='rmsprop', loss='categorical_crossentropy')
print(model.summary())
import numpy as np
x_train = np.random.random((1000, 64))
y_train = np.random.random((1000, 10))
from IPython import embed; embed()
model.fit(x_train, y_train, epochs=10, batch_size=128)
score = model.evaluate(x_train, y_train)
|
[
"okehara_aoi@microad.co.jp"
] |
okehara_aoi@microad.co.jp
|
42cbab2e55f7dc9e5fe0dd1ae6e8bc8112d0ad8a
|
f9ff745b7d825e685a196b1e24ccd14d9d744b6a
|
/args.py
|
ac8fa6ba56e2a1250e54b603a396f5a9913fb4f0
|
[] |
no_license
|
lv10wizard/ig-highlights-bot
|
ce061275acaca9108bd27da3017ec492832d3934
|
e551d68f51f222c58428029df1eda7838825101f
|
refs/heads/master
| 2021-05-16T05:38:38.934210
| 2018-04-07T03:34:22
| 2018-04-07T03:34:22
| 103,170,792
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 30,326
|
py
|
from __future__ import print_function
import argparse
import os
import re
from six import iteritems
from six.moves import input
from constants import (
AUTHOR,
DATA_ROOT_DIR,
)
from src import (
config,
reddit,
)
from src.database import (
BadUsernamesDatabase,
Database,
get_class_from_name,
InstagramDatabase,
SUBCLASSES,
SubredditsDatabase,
UniqueConstraintFailed,
)
from src.util import (
confirm,
logger,
mkdirs,
)
DRY_RUN = 'dry-run'
SHUTDOWN = 'shutdown'
BACKUP = 'backup'
LOAD_BACKUP = 'load-backup'
ADD_SUBREDDIT = 'add-subreddit'
RM_SUBREDDIT = 'rm-subreddit'
ADD_BLACKLIST = 'add-blacklist'
RM_BLACKLIST = 'rm-blacklist'
ADD_BAD_USERNAME = 'add-bad-username'
RM_BAD_USERNAME = 'rm-bad-username'
DELETE_DATA = 'delete-data'
DUMP = 'dump'
IG_DB = 'ig-db'
IG_DB_LIKES = 'ig-db-likes'
IG_DB_COMMENTS = 'ig-db-comments'
IG_DB_LINKS_RAW = 'ig-db-links-raw'
IG_CHOICES = 'ig-db-choices'
DATABASE_CHOICES = sorted(list(SUBCLASSES.keys()))
try:
# disallow InstagramDatabase from --dump choices since they are handled
# separately
DATABASE_CHOICES.remove('InstagramDatabase')
except ValueError:
# database class renamed? this shouldn't happen
pass
BACKUP_CHOICES = [
name for name in DATABASE_CHOICES
# don't bother backing up the ratelimit database
if name != 'InstagramRateLimitDatabase'
]
igdb_path = Database.format_path(InstagramDatabase.PATH)
resolved_igdb_path = config.resolve_path(igdb_path)
try:
IG_DB_CHOICES = [
name for name in os.listdir(resolved_igdb_path)
if name.endswith('.db')
]
except OSError:
IG_DB_CHOICES = []
IG_DB_DISPLAY_CHOICES = []
else:
IG_DB_DISPLAY_CHOICES = sorted([name[:-3] for name in IG_DB_CHOICES])
IG_DB_DISPLAY_CHOICES += ['*']
def to_opt_str(arg_str):
return arg_str.replace('-', '_')
def to_cmdline(arg_str):
return arg_str.replace('_', '-')
def shutdown(cfg, do_shutdown=True):
import sys
if sys.platform == 'win32':
from signal import CTRL_C_EVENT as SIGINT
else:
from signal import SIGINT
from src.mixins.proc import get_pid_file
fail_msg = 'Could not determine bot\'s pid (is the bot running?)'
bot_pid_file = get_pid_file('IgHighlightsBot')
if not bot_pid_file:
logger.info(fail_msg)
return
try:
with open(bot_pid_file, 'r') as fd:
main_pid = fd.read()
except (IOError, OSError):
logger.exception(fail_msg)
return
try:
main_pid = int(main_pid)
except (TypeError, ValueError):
msg = [fail_msg, '{path} contents: \'{content}\'']
logger.exception('\n'.join(msg),
path=bot_pid_file,
content=main_pid,
)
return
if confirm('Shutdown bot (pid={0})?'.format(main_pid)):
logger.info('Shutting down bot ({color_pid}) ...', color_pid=main_pid)
try:
os.kill(main_pid, SIGINT)
except OSError:
logger.exception('Could not shutdown the bot ({color_pid})!',
color_pid=main_pid,
)
else:
logger.info('Leaving the bot alive ({color_pid})', color_pid=main_pid)
def backup(cfg, *databases):
import sqlite3
if '*' in databases:
databases = BACKUP_CHOICES
for db_name in databases:
db_class = get_class_from_name(db_name)
if not db_class:
continue
path = Database.format_path(db_class.PATH)
resolved_path = Database.resolve_path(path)
if os.path.exists(resolved_path):
backup_path = Database.format_backup_path(
db_class.PATH
)
resolved_backup = Database.resolve_path(backup_path)
mkdirs(os.path.dirname(resolved_backup))
logger.info('Backing up \'{db_name}\' ({basename})'
' to \'{backup_path}\' ...',
db_name=db_name,
basename=os.path.basename(path),
backup_path=backup_path,
)
connection = sqlite3.connect(resolved_path)
try:
with open(resolved_backup, 'w') as fd:
for line in connection.iterdump():
print(line, end=os.linesep, file=fd)
except (IOError, OSError):
logger.exception('Failed to backup \'{db_name}\' to SQL'
' text format!',
db_name=db_name,
)
finally:
logger.info('Successfully backed up \'{db_name}\' to'
' \'{backup_path}\'!',
db_name=db_name,
backup_path=backup_path,
)
connection.close()
else:
logger.info('Cannot backup \'{db_name}\': no database file'
' \'{path}\'',
db_name=db_name,
path=path,
)
def _load_backup(db_path, sql_script):
"""
Loads the database @ db_path with the specified sql_script string.
** This will wipe the existing database **
db_path (str) - the path to the database to load
sql_script (str) - the SQL script to populate the database with
(see sqlite3#executescript)
Returns True if the database is successfully loaded from the script
"""
import sqlite3
success = False
do_load = True
resolved_path = Database.resolve_path(db_path)
# keep the database file in case something goes wrong
# XXX: this isn't 100% safe because it may move the file while it is in
# an incomplete state, potentially corrupting it. it is however better than
# simply deleting the database file outright.
if os.path.exists(resolved_path):
tmp_path = '{0}.tmp'.format(resolved_path)
logger.debug('Moving \'{old}\' -> \'{tmp}\'',
old=db_path,
tmp=tmp_path,
)
try:
os.rename(resolved_path, tmp_path)
except OSError:
logger.exception('Failed to move old database file \'{path}\'!',
path=db_path,
)
do_load = False
if do_load:
mkdirs(os.path.dirname(resolved_path))
connection = sqlite3.connect(resolved_path)
try:
with connection:
connection.executescript(sql_script)
except:
logger.exception('Failed to load \'{db_path}\' from backup!',
db_path=db_path,
)
logger.info('Reverting ...')
try:
# XXX: remove first in case .rename doesn't clobber
if os.path.exists(resolved_path):
os.remove(resolved_path)
os.rename(tmp_path, resolved_path)
except OSError:
logger.exception('Failed to revert \'{tmp}\' -> \'{old}\'!',
tmp=tmp_path,
old=db_path,
)
else:
logger.debug('Removing \'{tmp}\' ...', tmp=tmp_path)
try:
os.remove(tmp_path)
except OSError:
# littering: load success but the previous database file
# wasn't cleaned up properly
logger.warn('Failed to remove previous database file'
' \'{tmp}\'',
tmp=tmp_path,
exc_info=True,
)
success = True
return success
def load_backup(cfg, *databases):
import time
if '*' in databases:
databases = BACKUP_CHOICES
logger.info('** Loading from backup will wipe any changes in the'
' database **\n')
for db_name in databases:
db_class = get_class_from_name(db_name)
if not db_class:
continue
backup_path = Database.format_backup_path(db_class.PATH)
resolved_backup = Database.resolve_path(backup_path)
if os.path.exists(resolved_backup):
try:
backup_mtime = os.path.getmtime(resolved_backup)
except (IOError, OSError):
logger.debug('Failed to stat \'{path}\' (for mtime)',
path=backup_path,
)
backup_mtime = -1
confirm_msg = ['Load \'{0}\'?']
if backup_mtime > 0:
confirm_msg.append('(backup last modified @ {1})')
confirm_msg = ' '.join(confirm_msg).format(
db_name,
time.strftime(
'%m/%d, %H:%M:%S', time.localtime(backup_mtime)
),
)
if confirm(confirm_msg):
try:
with open(resolved_backup, 'r') as fd:
sql = [line for line in fd if line]
except (IOError, OSError):
logger.exception('Failed to read \'{db_name}\' backup'
' ({path})!',
db_name=db_name,
path=backup_path,
)
else:
path = Database.format_path(db_class.PATH)
if _load_backup(path, ''.join(sql)):
logger.info('Successfully loaded \'{db_name}\''
' ({basename}) from \'{backup_path}\'',
db_name=db_name,
basename=os.path.basename(path),
backup_path=backup_path,
)
else:
logger.info('Cannot load \'{db_name}\': no backup found ({path})',
db_name=db_name,
path=backup_path,
)
def add_subreddit(cfg, *subreddits):
subreddits_db = SubredditsDatabase(do_seed=False)
for sub in subreddits:
_, sub_name = reddit.split_prefixed_name(sub)
# in case the user passed something like '/u/'
if sub_name:
if sub_name not in subreddits_db:
try:
with subreddits_db:
subreddits_db.insert(sub_name)
except UniqueConstraintFailed:
# this means there is a bug in __contains__
logger.warn('Failed to add \'{sub_name}\' (already added)!',
sub_name=reddit.prefix_subreddit(sub_name),
exc_info=True,
)
else:
logger.info('Successfully added \'{sub_name}\'!',
sub_name=reddit.prefix_subreddit(sub_name),
)
else:
logger.info('Cannot add \'{sub_name}\': already added!',
sub_name=sub_name,
)
def rm_subreddit(cfg, *subreddits):
subreddits_db = SubredditsDatabase(do_seed=False)
for sub in subreddits:
_, sub_name = reddit.split_prefixed_name(sub)
# in case the user passed something like '/u/'
if sub_name:
if sub_name in subreddits_db:
with subreddits_db:
subreddits_db.delete(sub_name)
logger.info('Successfully removed \'{sub_name}\'!',
sub_name=reddit.prefix_subreddit(sub_name),
)
else:
logger.info('Cannot remove \'{sub_name}\': not in database!',
sub_name=sub_name,
)
def add_blacklist(cfg, *names):
from src.blacklist import Blacklist
blacklist = Blacklist(cfg)
for name in names:
blacklist.add(name)
def rm_blacklist(cfg, *names):
from src.blacklist import Blacklist
blacklist = Blacklist(cfg)
for name in names:
blacklist.remove(name)
def add_bad_username(cfg, text, fullname, score):
# TODO? look up submission thing prefix dynamically
submission_prefix = 't3_'
if not fullname.startswith(submission_prefix):
logger.info('Usage: --{add_bad_username_opt} TEXT FULLNAME SCORE',
add_bad_username_opt=ADD_BAD_USERNAME,
)
logger.info('FULLNAME must be a submission fullname (starting with'
' {prefix}, not {fullname}): not adding \'{text}\'',
prefix=submission_prefix,
fullname=fullname,
text=text,
)
return
bad_usernames = BadUsernamesDatabase()
logger.info('Adding \'{color_text}\' ({fullname}, {score}) as a'
' bad-username ...',
color_text=text,
fullname=fullname,
score=score,
)
try:
with bad_usernames:
bad_usernames.insert(text, fullname, score)
except UniqueConstraintFailed:
logger.info('\'{color_text}\' already considered a bad-username',
color_text=text,
)
else:
logger.info('Successfully added \'{color_text}\' as a bad-username!',
color_text=text,
)
def rm_bad_username(cfg, *text):
bad_usernames = BadUsernamesDatabase()
for t in text:
if t in bad_usernames:
logger.info('Removing bad-username: \'{color_text}\' ...',
color_text=t,
)
with bad_usernames:
bad_usernames.delete(t)
else:
logger.info('\'{color_text}\' was not considered a bad-username!',
color_text=t,
)
def delete_data(cfg, do_delete=True):
import shutil
import stat
if not do_delete:
return
# assumption: all data is stored under a single directory
base_path = DATA_ROOT_DIR
resolved_path = config.resolve_path(base_path)
if not os.path.exists(resolved_path):
logger.info('No program data found in \'{0}\'', resolved_path)
return
if confirm('Delete all data in \'{0}\'?'.format(base_path)):
logger.info('Deleting all data ...')
def onerr(func, path, exc):
"""
https://docs.python.org/3/library/shutil.html#rmtree-example
"""
# qualify the func name so that we get a better sense of which
# function was called
funcname = []
try:
funcname.append(func.__module__)
except AttributeError:
# can this happen?
pass
funcname.append(func.__name__)
logger.debug('An error occured calling {funcname}({path}) !',
funcname='.'.join(funcname),
path=path,
exc_info=True,
)
if not os.access(path, os.W_OK):
logger.debug('Attempting to clear readonly bit ...')
try:
os.chmod(path, stat.S_IWRITE)
func(path)
except (IOError, OSError):
logger.warn('Could not remove \'{path}\'!',
path=path,
exc_info=True,
)
shutil.rmtree(resolved_path, onerror=onerr)
else:
logger.info('Leaving data as is.')
def do_print_database(path, query=''):
import sqlite3
logger.info('Dumping \'{path}\' ...', path=path)
connection = sqlite3.connect(path)
connection.row_factory = sqlite3.Row
try:
# https://stackoverflow.com/a/305639
tables = connection.execute(
'SELECT name FROM sqlite_master WHERE type = \'table\''
)
except sqlite3.DatabaseError as e:
# eg. not a database
logger.exception('Could not lookup tables in \'{path}\'',
path=path,
)
else:
print('{0}:'.format(os.path.basename(path)), end='\n\n')
# https://stackoverflow.com/a/13335514
# [('foo',), ('bar',), ('baz',)]
# -> ('foo', 'bar', 'baz')
tables = [name[0] for name in tables]
cursors = {
name: connection.execute(
'SELECT * FROM {0} {1}'.format(name, query)
)
for name in tables
}
sep = ' | '
horiz_sep = '-' * 72
end = 'number of rows:'
for name in cursors:
print(horiz_sep)
print('table \'{0}\':'.format(name))
print(horiz_sep)
cur = cursors[name]
num = 0
keys = []
padding = {}
for row in cur:
if not keys:
keys = row.keys()
columns = []
for k in keys:
# XXX: assume the first row is emblematic of the width
# of each column to avoid reading the entire database
# into memory (this may mean that some rows are not
# formatted correctly)
padding[k] = max(len(str(row[k])), len(k))
columns.append('{0:^{1}}'.format(k, padding[k]))
# print out the columns
print(*columns, sep=sep)
print(horiz_sep)
# print out each row
row_str = [
'{0:<{1}}'.format(str(row[k]), padding[k])
for k in keys
]
print(*row_str, sep=sep)
num += 1
end_out_len = len(end) + len(str(num)) + 1 # +1 for space
end_sep = ' ' + horiz_sep[end_out_len + 1:] # +1 for space
print(end, num, end=end_sep + '\n\n')
def print_database(cfg, *databases):
if '*' in databases:
# dump all databases
databases = DATABASE_CHOICES
for db_name in databases:
if db_name == 'InstagramDatabase':
logger.info('Please use --{opt} to dump individual instagram'
' databases',
opt=IG_DB,
)
continue
db_class = get_class_from_name(db_name)
if not db_class:
continue
path = Database.format_path(db_class.PATH)
resolved_path = Database.resolve_path(path)
if os.path.exists(resolved_path):
do_print_database(resolved_path)
else:
logger.info('No database file: \'{path}\'', path=path)
def print_instagram_database_wrapper(callback, order, *user_databases):
if '*' in user_databases:
# dump all instagram databases
user_databases = IG_DB_CHOICES
orig_order = order
for user_db in user_databases:
if not user_db.endswith('.db'):
user_db = '{0}.db'.format(user_db)
path = os.path.join(resolved_igdb_path, user_db)
if os.path.exists(path):
if not orig_order:
# use the default order if none was specified
igdb = InstagramDatabase(path)
if igdb.size() == 0:
igdb.close()
logger.info('Removing \'{path}\': empty database ...',
path=path,
)
try:
os.remove(path)
except (IOError, OSError):
logger.exception('Failed to remove \'{path}\'!',
path=path,
)
continue
order = 'ORDER BY {0}'.format(igdb.order_string)
logger.debug(order)
callback(path, order)
else:
path_raw = os.path.join(InstagramDatabase.PATH, user_db)
logger.info('No instagram data for user: \'{user}\'',
user=re.sub(r'[.]db$', '', user_db),
)
def print_instagram_database(cfg, order, *user_databases):
print_instagram_database_wrapper(do_print_database, order, *user_databases)
def print_instagram_database_links(cfg, order, *user_databases):
def do_print_links(path, order):
import sqlite3
from src.instagram.constants import MEDIA_LINK_FMT
if os.path.exists(path):
db = sqlite3.connect(path)
db.row_factory = sqlite3.Row
cursor = db.execute('SELECT code FROM cache {0}'.format(order))
for row in cursor:
print(MEDIA_LINK_FMT.format(row['code']))
print_instagram_database_wrapper(do_print_links, order, *user_databases)
def print_igdb_choices(cfg, do_print=True):
if not do_print:
return
print('--{0} choices:'.format(IG_DB))
line = []
sep = ', '
first_char = None
for c in IG_DB_DISPLAY_CHOICES:
formatted_line = sep.join(line)
c_first = c[0].lower()
if first_char != c_first:
# separate the choices by the first letter
end = ''
if formatted_line:
end = sep
end += '\n\n'
print(formatted_line, end=end)
first_char = c_first
line = [c]
# '..., <c>, ' => 2 * len(sep)
elif len(formatted_line) + 2*len(sep) + len(c) >= 80:
print(formatted_line, end=sep+'\n')
line = [c]
else:
line.append(c)
if line:
# print the trailing database choices
print(sep.join(line))
def handle(cfg, args):
handlers = {
SHUTDOWN: shutdown,
BACKUP: backup,
LOAD_BACKUP: load_backup,
ADD_SUBREDDIT: add_subreddit,
RM_SUBREDDIT: rm_subreddit,
ADD_BLACKLIST: add_blacklist,
RM_BLACKLIST: rm_blacklist,
ADD_BAD_USERNAME: add_bad_username,
RM_BAD_USERNAME: rm_bad_username,
DELETE_DATA: delete_data,
DUMP: print_database,
IG_DB: print_instagram_database,
IG_DB_LIKES: print_instagram_database,
IG_DB_COMMENTS: print_instagram_database,
IG_DB_LINKS_RAW: print_instagram_database_links,
IG_CHOICES: print_igdb_choices,
}
order = {
IG_DB: None,
IG_DB_LIKES: 'ORDER BY num_likes DESC',
IG_DB_COMMENTS: 'ORDER BY num_comments DESC',
IG_DB_LINKS_RAW: None,
}
had_handleable_opt = False
for opt, opt_val in iteritems(args):
opt_key = to_cmdline(opt)
# XXX: options should evaluate to true if they are to be handled
if opt_key in handlers and bool(opt_val):
had_handleable_opt = True
try:
handler_func = handlers[opt_key]
except KeyError as e:
logger.exception('No option handler defined for \'{opt}\'!',
opt=opt,
)
else:
try:
if opt_key in order:
handler_func(cfg, order[opt_key], *opt_val)
else:
handler_func(cfg, *opt_val)
except TypeError:
# opt_val not iterable
handler_func(cfg, opt_val)
return had_handleable_opt
def parse():
parser = argparse.ArgumentParser(
description='Instagram Highlights Bot, a reddit bot that will reply'
' to comments linking to instagram accounts with their most popular'
' media.'
)
parser.add_argument('-c', '--config', metavar='PATH',
help='Custom config path; default: {0}'.format(config.Config.PATH),
)
parser.add_argument('-d', '--{0}'.format(DRY_RUN), action='store_true',
help='Runs the bot normally but disables it from replying to'
' comments and posts. This mode is intended as a sort of "live"'
' test.'
)
parser.add_argument('-P', '--logging-path', metavar='PATH',
help='Set the root directory to save logs to (this overrides the'
' config setting).',
)
parser.add_argument('-L', '--logging-level',
choices=[
logger.DEBUG, 'DEBUG',
logger.INFO, 'INFO',
logger.WARNING, 'WARNING',
logger.ERROR, 'ERROR',
logger.CRITICAL, 'CRITICAL',
],
help='Set the logging level (this overrides the config setting).',
)
parser.add_argument('-N', '--logging-no-color', action='store_true',
help='Turn off logging colors (this overrides the config setting).',
)
parser.add_argument('--{0}'.format(SHUTDOWN), action='store_true',
help='Kills the bot process and all sub-processes.',
)
parser.add_argument('--{0}'.format(ADD_SUBREDDIT),
metavar='SUBREDDIT', nargs='+',
help='Add subreddit(s) to the comment stream (these are subreddits'
' that the bot crawls).',
)
parser.add_argument('--{0}'.format(RM_SUBREDDIT),
metavar='SUBREDDIT', nargs='+',
help='Remove subreddit(s) from the comment stream (the bot will'
' no longer crawl these subreddits but will still make replies if'
' summoned).',
)
user_example = 'user(s) (eg. \'{0}{1}\')'.format(reddit.PREFIX_USER, AUTHOR)
sub_example = 'subreddit(s) (eg. \'{0}{1}\')'.format(
reddit.PREFIX_SUBREDDIT, 'history'
)
note = (
'Note: user profiles are specified as \'{0}u_<username>\''
' (eg. \'{0}u_{1}\')'.format(reddit.PREFIX_SUBREDDIT, AUTHOR)
)
parser.add_argument('--{0}'.format(ADD_BLACKLIST),
metavar='NAME', nargs='+',
help='Blacklist {user} or {sub} so that the bot no longer replies'
' to those user(s) or to comments/posts in those subreddit(s).'
' {note}.'.format(
user=user_example,
sub=sub_example,
note=note,
),
)
parser.add_argument('--{0}'.format(RM_BLACKLIST),
metavar='NAME', nargs='+',
help='Remove {user} or {sub} from the blacklist so that the bot can'
' reply to those user(s) or comments/posts in those subreddit(s).'
' {note}.'.format(
user=user_example,
sub=sub_example,
note=note,
)
)
parser.add_argument('--{0}'.format(ADD_BAD_USERNAME),
metavar='STRING', nargs=3,
help='Adds a string to the bad-usernames database so that it will'
' not be matched in the future as a potential username.'
' Usage: --{0} bad_username submission_fullname score. The first'
' argument should be the string that the bot should ignore in the'
' future. The second argument should be the fullname of the'
' submission containing the username. The third should be'
' the score of the deleted bot comment.'.format(ADD_BAD_USERNAME),
)
parser.add_argument('--{0}'.format(RM_BAD_USERNAME),
metavar='STRING', nargs='+',
help='Removes the string(s) from the bad-usernames database so that'
' they can be matched in the future as potential instagram'
' usernames again.'
)
parser.add_argument('--{0}'.format(DELETE_DATA), action='store_true',
help='Remove all data saved by the program.'
' This will ask for confirmation.',
)
database_choices = DATABASE_CHOICES + ['*']
backup_choices = BACKUP_CHOICES + ['*']
parser.add_argument('--{0}'.format(DUMP),
metavar='NAME', nargs='+', choices=database_choices,
help='Dump the specified database(s) to stdout.'
' Choices: {0}'.format(database_choices),
)
parser.add_argument('--{0}'.format(BACKUP),
metavar='NAME', nargs='+', choices=backup_choices,
help='Backup the specified database(s) to an SQL text format.'
' Backups are stored in \'{0}\'. Choices: {1}.'.format(
Database.BACKUPS_PATH_ROOT, backup_choices,
),
)
parser.add_argument('--{0}'.format(LOAD_BACKUP),
metavar='NAME', nargs='+', choices=backup_choices,
help='Load the specified database(s) from'
' its last --{0} dump. **This will cause any changes since the'
' last --{0} to be lost**'
' (will ask for confirmation). See --{0} for choices.'.format(
BACKUP,
),
)
ig_actual_choices = IG_DB_CHOICES + IG_DB_DISPLAY_CHOICES
parser.add_argument('--{0}'.format(IG_DB),
metavar='NAME', nargs='+',
choices=ig_actual_choices,
help='Dump the specified instagram user databases to stdout.'
' See --{0} for choices.'.format(IG_CHOICES),
)
parser.add_argument('--{0}'.format(IG_DB_LIKES), metavar='NAME',
nargs='+', choices=ig_actual_choices,
help='Dump the specified instagram user databases to stdout'
' sorted by most likes -> least likes.'
' See --{0} for choices.'.format(IG_CHOICES),
)
parser.add_argument('--{0}'.format(IG_DB_COMMENTS), metavar='NAME',
nargs='+', choices=ig_actual_choices,
help='Dump the specified instagram user databases to stdout'
' sorted by most comments -> least comments.'
' See --{0} for choices.'.format(IG_CHOICES),
)
parser.add_argument('--{0}'.format(IG_DB_LINKS_RAW), metavar='NAME',
nargs='+', choices=ig_actual_choices,
help='Dump the specified instagram user databases\' links to'
' stdout. See --{0} for choices.'.format(IG_CHOICES),
)
parser.add_argument('-I', '--{0}'.format(IG_CHOICES),
action='store_true',
help='List valid --{0} choices.'.format(IG_DB),
)
return vars(parser.parse_args())
__all__ = [
'handle',
'parse',
]
|
[
"lv10wizard@gmail.com"
] |
lv10wizard@gmail.com
|
fac37f083d42c910f33527664fdf1b8d30cd9f26
|
132f437f6dea029a05a98cc51c1b33be9531ecdf
|
/env/bin/gunicorn
|
dd175d426dac34ccf8e9689d7bd3f0436ebc6f26
|
[] |
no_license
|
dnlbellfield/official_website
|
15db7ae0e49c8757c051b24c4f4e95afab20ae46
|
7677941c89f91b26453dd54edb611a492ed2b9e4
|
refs/heads/main
| 2023-03-17T11:36:29.221759
| 2021-03-11T23:05:31
| 2021-03-11T23:05:31
| 346,860,939
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 253
|
#!/Users/johnbellfield/official_website/env/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from gunicorn.app.wsgiapp import run
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(run())
|
[
"dnlbellfield@gmail.com"
] |
dnlbellfield@gmail.com
|
|
d13f09077c30b481d74bd155cde1b173f07758c0
|
295f34f4411d984f0ff6026be6e96fe134dc1550
|
/home/pi/.local/share/Trash/files/lectoraTTL.py
|
3741207e9b221e2106d60bb92b8a05f4e64ecb34
|
[] |
no_license
|
mcashjavier/disco-linux-raspy
|
1e3fed914b6040fa9972e7cfc7357ecb72070e8c
|
8c23103cf089059fbdadfad8cfb7059c1580da83
|
refs/heads/master
| 2022-12-20T17:06:39.967203
| 2019-03-12T12:09:22
| 2019-03-12T20:01:10
| 175,072,541
| 0
| 3
| null | 2022-12-18T06:59:27
| 2019-03-11T19:44:12
| null |
UTF-8
|
Python
| false
| false
| 2,014
|
py
|
#Programa para lectora TTL
#DATA IN por el pin 7-->GPIO4
#CLK IN por el pin 13-->GPIO27
#CARD PRESENT IN por el pin 15-->GPI22
import tkinter
import RPi.GPIO as IO
import time
IO.setwarnings(True)
IO.setmode(IO.BCM) #anda con los pines GPIOxx
#IO.setmode(IO.BOARD) #anda con los pines de placa
#IO.setup(4,IO.IN,pull_up_down=IO.PUD_UP)
IO.setup(27,IO.IN,pull_up_down=IO.PUD_UP)
#IO.setup(22,IO.IN,pull_up_down=IO.PUD_UP)
#IO.setup(22,IO.IN)
#IO.setup(27,IO.IN)
Hab_CLK=True
contador=0
x=0
#def inte_DATA(channel):
# print ('canal %s activo GPIO4',channel)
## print ('Marcos Vetta')
# os.system('clear')
#def inte_CLK(channel):
## global Hab_CLK
# global contador
# if channel==27:
# contador=contador+1
# print ('CLK ',channel,IO.input(27),contador)
# #IO.cleanup()
## print('CONT',contador)
# os.system('clear')
# if channel==4:
# print ('DATA %d %d',channel,IO.input(4))
# elif channel==22:
# print('CARD present %d %d',channel,IO.input(22))
def inte_CARDPRESENT(channel):
# global Hab_CLK
# global contador
# if channel==22:
# contador=contador+1
# Hab_CLK=True
print ('CARD PRESENTE:',channel,contador)
# else:
# print('otro pin')
#IO.add_event_detect(4,IO.RISING,interrupcion_flanco_asc)
#IO.add_event_detect(4,IO.BOTH,interrupcion_flanco_asc)
#IO.add_event_detect(4,IO.FALLING,callback=inte_DATA)
#IO.add_event_detect(27,IO.RISING,callback=inte_CLK)
#IO.add_event_callback(27,inte_CLK)
IO.add_event_detect(27,IO.FALLING,callback=inte_CARDPRESENT,bouncetime=1)
#IO.add_event_detect(27,IO.FALLING,inte_CLK,bouncetime=500)
for x in range (0,500):
#entrada=IO.input(4)
#print('.')
#time.sleep(1)
if IO.input(27)>0.5:
print('input=',IO.input(27))
else:
print('input=',I0.input(27))
# time.sleep(0.5)
|
[
"prog2@magneticash.com"
] |
prog2@magneticash.com
|
4808fcb09c746d4045f3885a0b1760f7e8bdeb06
|
d82995f33a603b61e06deecd644f5a293823b746
|
/fully_convolutional_networks/transposed_convolutional.py
|
48d78190eca9ceba1e6e2adef3e7b2b4eb010ff1
|
[] |
no_license
|
RyanCargan/udacity-robond-term1
|
79d065aab42e4e308a93e36fa869bb48477aebef
|
a92a6e665e72930f9a33eb3a82f64da206b64fcf
|
refs/heads/master
| 2021-09-07T08:39:07.181342
| 2018-02-20T12:27:29
| 2018-02-20T12:27:29
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,024
|
py
|
import tensorflow as tf
import numpy as np
from one_by_one import custom_init
def upsample(x):
"""
Apply a 2 times upsample on x and return the result
x - the input feature
- 4 rank tensor
return: TF operation
"""
shape = x.get_shape()
input_shape = shape[1]
kernel_size = (input_shape * 2,input_shape * 2)
stride = 2
return tf.contrib.layers.conv2d_transpose(inputs=x,
num_outputs=shape[3],
kernel_size=kernel_size,
stride=stride,
padding='SAME')
if __name__ == '__main__':
x = tf.constant(np.random.randn(1, 4, 4, 3), dtype=tf.float32)
conv = upsample(x)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
result = sess.run(conv)
print('Input Shape: {}'.format(x.get_shape()))
print('Output Shape: {}'.format(result.shape))
|
[
"j.tabacii@gmail.com"
] |
j.tabacii@gmail.com
|
d558ea33f4ad605890eb70d3641f85bb38c410a8
|
71b934d407ce48148ae68370f5f5844b1e3a418a
|
/src/asker.py
|
4d1d1ccab26b5d60299de35466b2b2d023dc888e
|
[
"MIT"
] |
permissive
|
davidteather/zoom-scheduler
|
6734f8700716c8c726e9d7d85119d6d9176c3855
|
7f575979b508f50af65458ccb4ea73d842ea657e
|
refs/heads/main
| 2023-02-19T20:11:07.954339
| 2021-01-23T09:57:18
| 2021-01-23T09:57:18
| 330,213,863
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,416
|
py
|
import dateparser
import json
import os
if not os.path.isdir('data'):
os.mkdir('data')
data_path = "data/meetings.json"
images_path = "images/"
def ask():
name = input("What is the name of your class? (e.g. CS 101): ")
time = input("What time is your class? (e.g. 14:30): ")
hour = time.split(":")[0]
minute = time.split(":")[1]
print("\nDay options: Sun, Mon, Tue, Wed, Thu, Fri, Sat")
days = input("What days are your classes? (e.g. Mon,Wed,Fri): ")
room_id = input("Zoom ID: (e.g. 01234567890): ")
password = input("Zoom Password: (e.g. 123456): ")
end = input("When do these meetings end? (e.g. 5/15/2021): ")
end_epoch = dateparser.parse(
end, settings={"PREFER_DATES_FROM": "future"}).timestamp()
new_meeting = {
"class_name": name,
"crontab": f"{minute} {hour} * * {days}",
"room_id": room_id,
"password": password,
"end_date": end_epoch
}
try:
with open(data_path, "r") as i:
current_data = json.loads(i.read())['meetings']
current_data.append(new_meeting)
except (FileNotFoundError, ValueError):
current_data = [new_meeting]
with open(data_path, 'w+', encoding='utf-8') as f:
json.dump({'meetings': current_data}, f, ensure_ascii=False, indent=4)
inp = 'y'
while inp.lower() != 'n':
ask()
inp = input("Add more classes y/n: ")
|
[
"noreply@github.com"
] |
davidteather.noreply@github.com
|
f282d1785b0e2341e87ca9678267cf02af90232c
|
ebfcad44f8314f9efa758431eee180cbbddc8d53
|
/EplatServer/old/test.py
|
9879d5b7ff11f98037036a773bc08560b3bb0688
|
[] |
no_license
|
rudi-code/e-plat
|
7baa22e72f9e7cb7e2a0d0ad2fc3dd380d1af97f
|
ecb0ec9eb82e7d9b974d7fe7a0c2e463e9c8b8dd
|
refs/heads/master
| 2023-06-19T20:40:29.846446
| 2021-07-22T12:21:02
| 2021-07-22T12:21:02
| 388,450,515
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,368
|
py
|
import json
import math
def calculate_distance(lat1, lon1, lat2, lon2):
earth_radius = 6378.137 # Radius of earth in KM
dLat = lat2 * math.pi / 180 - lat1 * math.pi / 180
dLon = lon2 * math.pi / 180 - lon1 * math.pi / 180
a = math.sin(dLat/2) * math.sin(dLat/2) + \
math.cos(lat1 * math.pi / 180) * math.cos(lat2 * math.pi / 180) * \
math.sin(dLon/2) * math.sin(dLon/2)
c = 2 * math.atan2(math.sqrt(a), math.sqrt(1-a))
d = earth_radius * c
return d * 1000 # meters
def calculate_angle(lat1, lon1, lat2, lon2):
dLon = lon2 * math.pi / 180 - lon1 * math.pi / 180
y = math.sin(dLon) * math.cos(lat2 * math.pi / 180)
x = math.cos(lat1 * math.pi / 180) * math.sin(lat2 * math.pi / 180) \
- math.sin(lat1 * math.pi / 180) * math.cos(lat2 * math.pi / 180) * math.cos(dLon)
brng = math.atan2(y, x)
brng = math.degrees(brng)
brng = (brng + 360) % 360
return brng
def convert_angle(degr):
direction = -1
if degr > 315 and degr < 45:
# north
direction = 0
elif degr > 45 and degr < 135:
# east
direction = 1
elif degr > 135 and degr < 225:
# south
direction = 2
else:
# west
direction = 3
return direction
if __name__ == '__main__':
junction = [-7.762049, 110.369364]
request = '{"device_id":"12345","direction":"north","lat":-7.762151897747425,"lon":110.37007759357256}'
# request = '{"device_id":"12345","direction":"north","lat":-7.762151897746425,"lon":110.37007759357256}'
# read previous data
with open('data.json') as data_file:
data_json = json.load(data_file)
# get the json
ambulance = json.loads(request)
data = data_json
if ambulance['lat'] == 0 and ambulance['lon'] == 0:
# exit signal, emitted when the ambulance leaving the traffic light
direction = 0
data['ambulance']['direction'] = 0
data['ambulance']['angle'] = 0
data['ambulance']['distance'] = -1
else:
# convert from angle to direction and set appropriate action
# for corresponding traffic light
angle = calculate_angle(junction[0], junction[1],\
ambulance['lat'], ambulance['lon'])
# calculate distance
distance = calculate_distance(junction[0], junction[1],\
ambulance['lat'], ambulance['lon'])
direction = convert_angle(angle)
data['ambulance']['direction'] = direction
data['ambulance']['angle'] = angle
data['ambulance']['distance'] = distance
print direction
print angle
# check the delta_distance to determine whether the ambulance is approaching
# or leaving the traffic light
if data['ambulance']['distance'] != -1 and distance >= data['ambulance']['distance']:
# approaching; set appropriate traffic light to green
data['traffic_light'][direction] = 1
elif data['ambulance']['distance'] != -1 and distance < data['ambulance']['distance']:
print 'here2'
# leaving set all traffic light to red
data['traffic_light'] = [0, 0, 0, 0]
else:
print 'here'
# exit signal, turn all lights to red
# leaving set all traffic light to red
data['traffic_light'] = [0, 0, 0, 0]
# write to file
data['ambulance']['info'] = ambulance
with open('data.json', 'w') as data_file:
data_file.write(json.dumps(data))
|
[
"rudi.purnomo@quadras.co.id"
] |
rudi.purnomo@quadras.co.id
|
47ca79ad241ea2f2be47ad3ac495a37ae51d61fc
|
f0d713996eb095bcdc701f3fab0a8110b8541cbb
|
/TmasgxCm6iz3gTGHk_24.py
|
36dac173b508a5b700464e9c37d5bdb750607cd4
|
[] |
no_license
|
daniel-reich/turbo-robot
|
feda6c0523bb83ab8954b6d06302bfec5b16ebdf
|
a7a25c63097674c0a81675eed7e6b763785f1c41
|
refs/heads/main
| 2023-03-26T01:55:14.210264
| 2021-03-23T16:08:01
| 2021-03-23T16:08:01
| 350,773,815
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 756
|
py
|
"""
Write a function that returns the **length of the shortest contiguous
sublist** whose sum of all elements **strictly exceeds** `n`.
### Examples
min_length([5, 8, 2, -1, 3, 4], 9) ➞ 2
min_length([3, -1, 4, -2, -7, 2], 4) ➞ 3
# Shortest sublist whose sum exceeds 4 is: [3, -1, 4]
min_length([1, 0, 0, 0, 1], 1) ➞ 5
min_length([0, 1, 1, 0], 2) ➞ -1
### Notes
* The sublist should be composed of **contiguous elements** from the original list.
* If no such sublist exists, return `-1`.
"""
def min_length(lst, n):
s = sum(lst)
if lst:
x = min_length(lst[1:], n)
y = min_length(lst[:-1], n)
return min((k for k in (x, y) if k >= 0), default = len(lst) if s > n else -1)
return -1
|
[
"daniel.reich@danielreichs-MacBook-Pro.local"
] |
daniel.reich@danielreichs-MacBook-Pro.local
|
b7d757571c521ec0b025a9ed314c2e638c8e5170
|
ffeae925479dfa94d0ec710fcd19061ac3a6e98a
|
/Fibonacci_recursive.py
|
ad067db15f7af75be20738034514ae661d2f50b1
|
[
"MIT"
] |
permissive
|
NageshJ2014/TestRepor
|
7c346d162cd5bfcaf178c3b84d37083a5a45fee1
|
bea3c5feca4fda196f43797d12bf752d7c97959a
|
refs/heads/master
| 2020-04-02T00:42:05.923675
| 2018-10-19T17:28:45
| 2018-10-19T17:28:45
| 153,817,603
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 810
|
py
|
def fibonaaci(pos):
if pos == 1:
#print('0')
return 0
if pos == 2:
#print('1')
return 1;
n1 = fibonaaci(pos -1);
n2 = fibonaaci (pos -2);
print (n1)
return n1 + n2;
n5 = fibonaaci ( 5);
print(n5);
name = 'Andy' # define name
def printBob():
global name;
print('printing from the def: ', name) # print from function
name = 'Bob' # define name in function
print('printing from the def: ', name) # print from function
# the main function
print('printing from the main:{} Before Global Change '.format(name)) # print from the main
printBob() # call the function to print
print('printing from the main: ', name) # print from the main
num = input('Enter a number : ')
print('The zero-padded number is : ', str(num).rjust(10,'0'))
|
[
"nagesh.jayaram@gmail.com"
] |
nagesh.jayaram@gmail.com
|
4cf5c113f26e735a332c73a5524e7f0e21574ed9
|
e140817f5831fdf7cf93111455a12788794da8eb
|
/testFiles/outdatedFiles/ZoneCollection.py
|
de4a428260c0d30b144b4ec282a45ee427f6680e
|
[] |
no_license
|
Davidchristy/TVAC
|
0ef40fbf5ecb6ab369f69db9ef281c9b09ecd7e6
|
ab111fa67e7e1cecb4543887f5eaea28edcbc29e
|
refs/heads/master
| 2021-09-09T14:14:05.342538
| 2018-03-16T21:57:56
| 2018-03-16T21:57:56
| 116,292,748
| 0
| 0
| null | 2018-02-02T04:09:16
| 2018-01-04T18:18:47
|
Python
|
UTF-8
|
Python
| false
| false
| 1,235
|
py
|
import uuid
from Logging.Logging import Logging
class ZoneCollection:
def __init__(self, parent):
Logging.debugPrint(2,"Creating ZoneCollection")
# self.zone_dict = build_zone_collection()
self.profile_uuid = uuid.uuid4()
self.parent = parent
# self.update_period = 10
# self.profile_name = None
# self.thermal_start_time = None
# self.expected_time_values = None
# self.set_points_start_time = None
def getZone(self,d):
return self.zone_dict[d]
def get_zones_data_json(self):
'''
This returns a json string of the currently loaded profile
'''
return ('{"profileuuid":"%s","updateperiod":%s,"profile":[ %s ]}' % (self.profile_uuid,self.parent.update_period,self.fill_zones()))
def fill_zones(self):
'''password
This is a helper function for getJson()
'''
message = []
zone_len = len(self.zone_dict)
count = 0
for zone in self.zone_dict:
message.append(self.zone_dict[zone].getJson())
if count < (zone_len - 1):
message.append(',')
count = count + 1
return ''.join(message)
|
[
"DavidChristy520@gmail.com"
] |
DavidChristy520@gmail.com
|
24a3c675c28fa9825a10c00d02ac5b8e5e160fe3
|
337a67e82c446a27bd96770d6b02e889a426eb8d
|
/03-Python/1/Activities/12-Stu_NumberChain/Solved/NumberChain_Solved.py
|
0f299c03f8ad92022d5b6a63c1516139f2ee21bd
|
[] |
no_license
|
ktatineni/GTechCode
|
6a317a3020c91ef259aba8c647341c6b6f59396e
|
01bcfca38f4f403825ff54ff1497ac78329d120e
|
refs/heads/master
| 2020-05-01T13:44:06.320959
| 2019-05-14T16:31:10
| 2019-05-14T16:31:10
| 177,499,930
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 461
|
py
|
# Initial variable to track game play
user_play = "y"
# While we are still playing...
while user_play == "y":
# Ask the user how many numbers to loop through
user_number = input("How many numbers? ")
# Loop through the numbers. (Be sure to cast the string into an integer.)
for x in range(int(user_number)):
# Print each number in the range
print(x)
# Once complete...
user_play = input("Continue: (y)es or (n)o? ")
|
[
"krishna@krishnas-mbp.attlocal.net"
] |
krishna@krishnas-mbp.attlocal.net
|
43f0bad355eb6dd19692fb8dfa7f34dd1d19199a
|
7a14fa5256977a08c033aea484dace528a8e6ae2
|
/formacao_primeit/exercises/swallows.py
|
56dfa7d20d9ed4486ce54119c73f00f490b29816
|
[] |
no_license
|
vinicius-vph/python
|
cdf8496d12940d0ce2399b25d94da2aac0e4983b
|
503712c83cf3f712a825127620974125945e2601
|
refs/heads/main
| 2023-05-16T15:26:53.440422
| 2023-05-09T19:42:54
| 2023-05-09T19:42:54
| 340,350,963
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 253
|
py
|
# Vinicius Santos, Curso de python, Sessão 1
"""
Coméntarios qualquer
que está em várias linhas
"""
print("Hello, world!")
print("Hello, world!")
print()
print("Suppose two swallows \"carry\" it together.")
print('African or "European" swallows?')
|
[
"vinicius.vph@gmail.com"
] |
vinicius.vph@gmail.com
|
226ea7fa9126703aafbe9372476da27dfecbdb89
|
7f54637e347e5773dfbfded7b46b58b50544cfe5
|
/step6/qdoba/qdoba/middlewares.py
|
a506e086c2996cefbf6a582ca4db11cd012ac34d
|
[] |
no_license
|
simba999/all-scrapy
|
5cc26fd92b1d03366b74d4fff58c4a0641c85609
|
d48aeb3c00fa2474153fbc8d131cf58402976e1d
|
refs/heads/master
| 2021-01-25T14:24:04.715550
| 2018-03-03T13:43:13
| 2018-03-03T13:43:13
| 123,695,640
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,879
|
py
|
# -*- coding: utf-8 -*-
# Define here the models for your spider middleware
#
# See documentation in:
# http://doc.scrapy.org/en/latest/topics/spider-middleware.html
from scrapy import signals
class QdobaSpiderMiddleware(object):
# Not all methods need to be defined. If a method is not defined,
# scrapy acts as if the spider middleware does not modify the
# passed objects.
@classmethod
def from_crawler(cls, crawler):
# This method is used by Scrapy to create your spiders.
s = cls()
crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
return s
def process_spider_input(response, spider):
# Called for each response that goes through the spider
# middleware and into the spider.
# Should return None or raise an exception.
return None
def process_spider_output(response, result, spider):
# Called with the results returned from the Spider, after
# it has processed the response.
# Must return an iterable of Request, dict or Item objects.
for i in result:
yield i
def process_spider_exception(response, exception, spider):
# Called when a spider or process_spider_input() method
# (from other spider middleware) raises an exception.
# Should return either None or an iterable of Response, dict
# or Item objects.
pass
def process_start_requests(start_requests, spider):
# Called with the start requests of the spider, and works
# similarly to the process_spider_output() method, except
# that it doesn’t have a response associated.
# Must return only requests (not items).
for r in start_requests:
yield r
def spider_opened(self, spider):
spider.logger.info('Spider opened: %s' % spider.name)
|
[
"oliverking8985@yahoo.com"
] |
oliverking8985@yahoo.com
|
b0e3e29b8c8d3347a402574bd4137ff42303a1f4
|
23a5c5ccb7bedbfd9929c611a358aba0caf1ea1c
|
/CFRplus.py
|
e081dc1af3c2c49e9ee76c590ef1f53c661e99cd
|
[] |
no_license
|
snarb/RPSnn
|
058db0483aa674f101242c0436b57686952e19ac
|
60fa010a870fb1edb7de095469f1585b3868ad5e
|
refs/heads/master
| 2021-01-18T18:01:48.233984
| 2017-06-12T14:01:48
| 2017-06-12T14:01:48
| 86,837,612
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,203
|
py
|
from KuhnPoker import *
from treelib import Node, Tree
from CfrNode import CfrNode
from GameTree import GameTree
from matplotlib import pyplot as plt
import Utils
import math
from collections import Counter
from math import sqrt
import random
import time
import pandas as pd
from NodeEstimator import Estimator
class CFRtrainer:
def __init__(self):
self.playerOneTree = GameTree(CfrNode)
self.playerTwoTree = GameTree(CfrNode)
self.kuhn = KuhnPoker()
self.stats = Counter()
#self.alpha = alpha
self.trainigXdata = []
self.trainigYdata = []
self.hists = []
self.avgStr = []
self.stratSum = 0
self.iter = 0
self.betRegrets = []
def CFR(self, p0, p1):
curPlayer = self.kuhn.GetCurrentPlayer()
if(self.kuhn.IsTerminateState()):
return self.kuhn.GetPayoff(curPlayer)
curPlayerProb = p0 if curPlayer == Players.one else p1
opProb = p1 if curPlayer == Players.one else p0
tree = self.playerOneTree if curPlayer == Players.one else self.playerTwoTree
cfrNode = tree.GetOrCreateDataNode(self.kuhn, curPlayer)
strategy = cfrNode.GetStrategy(curPlayerProb)
util = [0.0] * NUM_ACTIONS
nodeUtil = 0
infosetStr = self.kuhn.GetInfoset(curPlayer)
infosetBackup = self.kuhn.SaveInfoSet()
for action in range(NUM_ACTIONS):
self.kuhn.MakeAction(action)
if(curPlayer == Players.one):
util[action] += -self.CFR(p0 * strategy[action], p1)
else:
util[action] += -self.CFR(p0, p1 * strategy[action])
nodeUtil += strategy[action] * util[action]
self.kuhn.RestoreInfoSet(infosetBackup)
for action in range(NUM_ACTIONS):
regret = util[action] - nodeUtil
cfrNode.regretSum[action] = cfrNode.regretSum[action] + opProb * regret
if(('1 | uplayed;uplayed;uplayed' in infosetStr) and curPlayer == Players.one):
self.trainigXdata.append(np.array(strategy))
self.trainigYdata.append(nodeUtil)
self.betRegrets.append(cfrNode.regretSum[1])
self.stratSum += strategy[1]
self.avgStr.append(self.stratSum / (len(self.avgStr) + 1))
self.iter += 1
return nodeUtil
def running_mean(self, x, N):
cumsum = np.cumsum(np.insert(x, 0, 0))
return (cumsum[N:] - cumsum[:-N]) / N
def Train(self):
util = 0
cnt = 0
start_time = time.time()
results = []
# utils = []
for i in range(1, 500):
self.kuhn.NewRound()
curUtil = self.CFR(1, 1)
util += curUtil
if(cnt % 80 == 0):
results.append(util / i)
def CheckNash(self):
if (self.kuhn.IsPlayerOneCloseToNash(self.playerOneTree)):
print("Player one is in Nash")
else:
print("Player one is not in Nash")
if(self.kuhn.IsPlayerTwoCloseToNash(self.playerTwoTree)):
print("Player two is in Nash")
else:
print("Player two is not in Nash")
|
[
"thesnarb@gmail.com"
] |
thesnarb@gmail.com
|
c5a8fe695a4c1a3e451a369a01d1885998462d0d
|
c72266d064fe2b52f9389e9352e01bb369d7ee52
|
/keyword_extraction/extra_analysis/word.py
|
694d9741c234169c35719c08c5d6378ef7045175
|
[
"MIT"
] |
permissive
|
luckypython777/stackoverflow-search
|
5f0e5f8bea4b778f46e4dd4ac0bc20196f9ed1e3
|
990b339a1b87530894d1068eb99d0b03e6476268
|
refs/heads/master
| 2021-12-09T13:11:35.448758
| 2016-05-17T06:33:41
| 2016-05-17T06:33:41
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,171
|
py
|
#########################################
# Plotting word clouds for common #
# words in the queries of each #
# language being analysed #
#########################################
from os import path
from PIL import Image
import numpy as np
import matplotlib.pyplot as plt
import wordcloud
from wordcloud import WordCloud, ImageColorGenerator
# d = path.dirname(__file__)
# Read the whole text.
text = open('fullfilePython.txt').read()
# read the mask / color image
# taken from http://jirkavinse.deviantart.com/art/quot-Real-Life-quot-Alice-282261010
alice_coloring = np.array(Image.open( "pythonlogo.png"))
wc = WordCloud(background_color="black", max_words=200, mask=alice_coloring,
max_font_size=80, random_state=42)
# generate word cloud
wc.generate(text)
# create coloring from image
image_colors = ImageColorGenerator(alice_coloring)
# show
plt.imshow(wc)
plt.axis("off")
plt.figure()
# recolor wordcloud and show
# we could also give color_func=image_colors directly in the constructor
plt.imshow(wc.recolor(color_func=image_colors))
plt.axis("off")
plt.figure()
plt.imshow(alice_coloring, cmap=plt.cm.gray)
plt.axis("off")
plt.show()
|
[
"hagarwa3@illinois.edu"
] |
hagarwa3@illinois.edu
|
1f63fe94a4ed3323b05298eb9685d791fe4f3e89
|
17052f5fa0d465e6a0f6f194cbab9ac66adb7b48
|
/services/migrations/0015_auto_20210314_1558.py
|
7a8222db2f2c72ed74c70830b663299925c50c7c
|
[] |
no_license
|
Code-Institute-Submissions/Solid-Properties-Project
|
7e7513b59acf4df85fe00634b3d504e7c04a5ffa
|
e573dcdd71092c90e35b1cf86e912d88200aa661
|
refs/heads/master
| 2023-04-02T16:21:20.437436
| 2021-04-05T10:25:05
| 2021-04-05T10:25:05
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 550
|
py
|
# Generated by Django 3.1.5 on 2021-03-14 15:58
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('services', '0014_auto_20210314_1556'),
]
operations = [
migrations.AddField(
model_name='category',
name='icon',
field=models.CharField(max_length=50, null=True),
),
migrations.AlterField(
model_name='category',
name='name',
field=models.CharField(max_length=254),
),
]
|
[
"astig.mandalian2014@gmail.com"
] |
astig.mandalian2014@gmail.com
|
0d159cde821bf873eb32118c556e7c536b923354
|
f7ed06524bfd49b8a3840f477696acea924e9728
|
/django_project/users/views.py
|
90741090346cf0debd4149e0d0c6f3256e754c74
|
[] |
no_license
|
alifarukyucel/django-web-app
|
7362c126c67bc8434ca4c0180cbe27bcb835a27b
|
01bc26d189e21e20206f8fc0ecede13cc58b8348
|
refs/heads/main
| 2023-07-09T12:41:28.808039
| 2021-08-18T15:39:25
| 2021-08-18T15:39:25
| 357,924,392
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,697
|
py
|
from django.shortcuts import render, redirect
from django.contrib import messages
from django.contrib.auth.decorators import login_required
from .forms import UserRegisterForm, UserUpdateForm, ProfileUpdateForm
def register(request):
if request.method == 'POST':
form = UserRegisterForm(request.POST)
if form.is_valid():
form.save()
username = form.cleaned_data.get('username')
messages.success(request, f'Account created for {username}! You are now able to log in')
return redirect('login')
else:
form = UserRegisterForm()
return render(request, 'users/register.html', {'form': form})
@login_required
def profile(request):
if request.method == 'POST':
u_form = UserUpdateForm(request.POST, instance=request.user)
p_form = ProfileUpdateForm(request.POST, request.FILES, instance=request.user.profile)
if u_form.is_valid() and p_form.is_valid():
u_form.save()
p_form.save()
messages.success(request, 'Your account has been updated!')
return redirect('profile') # redirect due to POST, GET, REDIRECT pattern.
# This way, the user doesn't get 'Are you sure you want to resubmit the form?' question. That question comes
# up when we make another POST request. By redirecting, we cause the browser to send a GET request after a
# reload.
else:
u_form = UserUpdateForm(instance=request.user)
p_form = ProfileUpdateForm(instance=request.user.profile)
context = {
'u_form': u_form,
'p_form': p_form
}
return render(request, 'users/profile.html', context)
|
[
"alifarukyucel@gmail.com"
] |
alifarukyucel@gmail.com
|
e9dfda36ce273fddb9ce89b2447f01d11b529d54
|
a15046d4bfd07025ed271a22820cdc43fb624cdc
|
/复读机.py
|
17964c542ef5d795f1f4a0211bc6fca29b788552
|
[] |
no_license
|
vashti0622/WeChat-Auto-Reply
|
3d5909a7085eeb37c8f993bdce1e8399fa666676
|
fbea0c40a43c361411edc882d55aa9e85958aa24
|
refs/heads/master
| 2020-06-02T18:57:17.940154
| 2019-06-12T00:19:12
| 2019-06-12T00:19:12
| 191,274,682
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 321
|
py
|
import itchat,re
@itchat.msg_register(itchat.content.TEXT)
def text_reply(msg):
reply_text = msg['Text']
if msg['Text'] == '?':
reply_text = '自动回复:欢迎来到复读机模式。'
return reply_text
itchat.auto_login(hotReload=True) #不用每次都扫码
itchat.run()
|
[
"noreply@github.com"
] |
vashti0622.noreply@github.com
|
8c665b7bb67da23417c012a0d814fee1582c6ba0
|
87f1f50002ac8d3835a578d46c12640bc5bd3d75
|
/6kyu/Replace Alphabet Position.py
|
108d0fe4c6aac63d0b5c0887e3d1d12eb861d251
|
[] |
no_license
|
cyrustsiu/Codewars-Solution
|
141088cb2ad735182e90f65b635bd2ff390d1a40
|
3cab594488735df311fceea7aad768c0b411cefb
|
refs/heads/master
| 2022-12-24T09:54:17.379176
| 2020-10-04T13:44:34
| 2020-10-04T13:44:34
| 288,074,872
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 436
|
py
|
def alphabet_position(text):
replace = {'a':1, 'b':2, 'c':3, 'd':4, 'e':5, 'f':6, 'g':7, 'h':8, 'i':9, 'j':10, 'k':11, 'l':12, 'm':13, 'n':14, 'o':15, 'p':16, 'q':17, 'r': 18, 's':19, 't':20, 'u':21, 'v':22, 'w':23, 'x':24, 'y':25, 'z':26}
result = []
for word in text.lower():
if word in replace:
result.append(replace[word])
else:
pass
return " ".join(map(str,result))
|
[
"noreply@github.com"
] |
cyrustsiu.noreply@github.com
|
2d7d14890a3799002720b8b25e8135482c83b170
|
b7888fb921abeb2ad44ce6409bf62ecef77a458e
|
/src/djanban/apps/members/migrations/0023_auto_20170519_1715.py
|
51f41a01f80784c5540f2db54942c0121d1f421e
|
[
"MIT"
] |
permissive
|
my-favorite-repositories/djanban
|
303ce59f821d01f727536068b83f8e8485b7d649
|
6451688d49cf235d03c604b19a6a8480b33eed87
|
refs/heads/master
| 2021-03-01T14:23:19.745085
| 2018-05-15T17:12:01
| 2018-05-15T17:12:01
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 526
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2017-05-19 15:15
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('members', '0022_member_custom_avatar'),
]
operations = [
migrations.AlterField(
model_name='member',
name='custom_avatar',
field=models.ImageField(blank=True, default=None, null=True, upload_to=b'', verbose_name='Custom avatar'),
),
]
|
[
"diegojromerolopez@gmail.com"
] |
diegojromerolopez@gmail.com
|
3d1151563451aabd27889d52b7c622cb5e6e639c
|
08e160550216d40025c37f6617b2c5b87350ee0c
|
/django_rest_api/django_rest_api/settings.py
|
855c75182761580ddcaa1e097989c1bad717f0a9
|
[] |
no_license
|
Poornima7/Django_RESt_API
|
92b7c946a8afcc7304be71f518a24a86aae2588a
|
6197372a0307b23d19bc0f7f73df29c6489867ac
|
refs/heads/master
| 2020-04-15T04:26:34.194050
| 2019-01-07T08:02:22
| 2019-01-07T08:02:22
| 164,383,474
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,146
|
py
|
"""
Django settings for django_rest_api project.
Generated by 'django-admin startproject' using Django 2.1.5.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.1/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '@&4+6v#k4uckq5_k3dda2wxn_5_jgmmdsi8i(8(t1w(4k*nm5p'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'my_app.apps.MyAppConfig',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'django_rest_api.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'django_rest_api.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.1/howto/static-files/
STATIC_URL = '/static/'
|
[
"poornima.slv@outlook.com"
] |
poornima.slv@outlook.com
|
a40adac7fa8a2c68d1dce489b4ffce81f7b70679
|
f885d8c09b9069817f1a25d0a11e889dbafdc4b4
|
/a1/test_res/scrap.py
|
998c748949cd3306a0315d40f8d49d18de2a726e
|
[] |
no_license
|
icecreambean/COMP9517
|
39dcd0578e8b17bd957bdb79e40d1644395f18e9
|
9cae8ae147c55e8f714c8af12f22f2cb6e4e54d9
|
refs/heads/master
| 2022-08-21T14:50:23.155144
| 2020-06-02T01:32:27
| 2020-06-02T01:32:27
| 243,438,635
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,266
|
py
|
border = int(max(g.shape)/4)
g_new = cv2.copyMakeBorder(g, border, border, border, border, cv2.BORDER_CONSTANT)
res = cv2.matchTemplate(r,g_new,cv2.TM_CCOEFF_NORMED)
min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(res)
r_top_left = max_loc
r_bottom_right = (r_top_left[0] + r.shape[1], r_top_left[1] + r.shape[0])
res = cv2.matchTemplate(b,g_new,cv2.TM_CCOEFF_NORMED)
min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(res)
b_top_left = max_loc
b_bottom_right = (b_top_left[0] + b.shape[1], b_top_left[1] + b.shape[0])
r_new = np.zeros(g_new.shape)
print(r_new.shape, r_top_left, r.shape)
h,w = r.shape
rstart, cstart = r_top_left
rstop = rstart + w
cstop = cstart + h
print(r_new[rstart:rstop,cstart:cstop].shape)
print(rstart, cstart, rstop, cstop)
r_new[cstart:cstop,rstart:rstop] = r
cv2.rectangle(r_new,r_top_left, r_bottom_right, 255, 2)
#bottom_right = (top_left[0] + w, top_left[1] + h)
#cv2.rectangle(b,top_left, bottom_right, 255, 2)
#https://www.pyimagesearch.com/2014/09/15/python-compare-two-images/
#color_img = cv2.merge((b,g,r))
#plt.imshow(color_img)
plt.subplot(1,2,1)
plt.imshow(r_new)
plt.subplot(1,2,2)
plt.imshow(g_new)
r_top_left, b_top_left
bn,gn,rn = alignComponents(b,g,r)
plt.subplot(3,1,1)
plt.imshow(bn)
plt.subplot(3,1,2)
plt.imshow(gn)
plt.subplot(3,1,3)
plt.imshow(rn)
bn.shape,gn.shape,rn.shape
plt.subplot(3,1,1)
plt.imshow(b)
plt.subplot(3,1,2)
plt.imshow(g)
plt.subplot(3,1,3)
plt.imshow(r)
ptype = type(g[0,0])
img_plane = np.array([np.iinfo(ptype).max] * template.size)
img_plane = np.reshape(img_plane, template.shape)
# comparison logic
#def _matchTemplate(mt_img, mt_template, cv_method):
# mt_result = cv2.matchTemplate(mt_img, mt_template, cv_method)
# _, _, mt_locmin, mt_locmax = cv2.minMaxLoc(mt_result)
# if cv_method in [cv2.TM_SQDIFF, cv2.TM_SQDIFF_NORMED]:
# mt_start = mt_locmin
# else:
# mt_start = mt_locmax
# mt_stop = (mt_start[0]+width(mt_img), mt_start[1]+height(mt_img))
# return mt_start, mt_stop # (top left, bot right) each in (r,c) format
# perform image alignment
def alignComponents(b,g,r):
# assume g == template (most accurately sliced)
border = int(max(g.shape)/4)
g_new = cv2.copyMakeBorder(g, border, border, border, border, cv2.BORDER_CONSTANT)
pix_type = type(g[0,0])
# compare: r,b <--> g (template)
r_start, _ = _matchTemplate(r, g_new)
b_start, _ = _matchTemplate(b, g_new)
# apply translation (to larger img plane)
r_new = np.zeros(g_new.shape, dtype=pix_type)
b_new = np.zeros(g_new.shape, dtype=pix_type)
print(r_start, b_start)
r_new[ r_start[1]:r_start[1]+height(r), r_start[1]:r_start[1]+width(r) ] = r
b_new[ b_start[0]:b_start[0]+height(b), b_start[1]:b_start[1]+width(b) ] = b
img_out = cv2.merge((b_new,g_new,r_new))
return img_out
mt: off(54,54): imgsize=(106, 167) (maxsize=(160, 221)); res=1290.5042508521697
def downsize(img):
r,c = img.shape
nr = r/2
nc = c/2
if nr < 1 or nc < 1: # can't scale down any further
return None
return cv2.resize(img, ( int(r/2), int(c/2) ))
|
[
"vict.tse.business@gmail.com"
] |
vict.tse.business@gmail.com
|
d5885eb6a99f09e649a1083ed122485adadb790c
|
6a41917cb0b0dd77d37bb040f43f758a28c9520a
|
/asign/settings.py
|
914cf8f678141aedc2a8ce6574d404c13144ff3e
|
[] |
no_license
|
akulacharan/asign
|
afe53b78197b6063c224853cad2ca67d6e98c584
|
c22d782d8accb8c5d5230f29939e4bdf7a5f1b61
|
refs/heads/master
| 2022-11-21T21:30:42.552848
| 2020-07-29T08:29:09
| 2020-07-29T08:29:09
| 282,475,318
| 0
| 0
| null | 2020-07-25T15:51:28
| 2020-07-25T15:50:34
|
Python
|
UTF-8
|
Python
| false
| false
| 3,403
|
py
|
"""
Django settings for asign project.
Generated by 'django-admin startproject' using Django 3.0.6.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.0/ref/settings/
"""
import django_heroku
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '#e$#bug(*s%n8$7)dxfwu9=p6kg^t*3j%12s_&kt9%x)_iyps4'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'my_app',
'crispy_forms',
]
CRISPY_TEMPLATE_PACK ='bootstrap4'
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'asign.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
AUTH_USER_MODEL = 'my_app.User'
WSGI_APPLICATION = 'asign.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql',
'NAME': 'profile',
'USER': 'postgres',
'PASSWORD':'Cherry@143',
'HOST':'localhost'
}
}
# Password validation
# https://docs.djangoproject.com/en/3.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'Asia/Kolkata'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.0/howto/static-files/
STATIC_URL = '/static/'
MEDIA_URL='/media/'
MEDIA_ROOT=os.path.join(BASE_DIR,'media')
# Activate Django-Heroku.
django_heroku.settings(locals())
|
[
"akulacharanteja@gmail.com"
] |
akulacharanteja@gmail.com
|
c039d48f55f8ecde519357b38ca442c0c18abd2c
|
0a68afb453c069bf16c6931a158d2f43a717b429
|
/nbc6.py
|
4406e50f30f75d140a509ec3fc278151d31f35e4
|
[] |
no_license
|
neriki/newbiecontest
|
7b53e05632aa9cc8f1358d82548c4a18de992244
|
d80a020a1f52c3b0ee3c81b9ea554e7e785b0d55
|
refs/heads/master
| 2021-01-10T07:01:28.198592
| 2015-10-19T16:22:15
| 2015-10-19T16:22:15
| 44,391,505
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,205
|
py
|
import urllib.request
from http import cookiejar
from urllib.parse import urlencode
import datetime
import math
jar = cookiejar.CookieJar()
credentials = {'user': 'neriki', 'passwrd': 'abcd1234!', 'cookielength' : '-1'}
credenc = urlencode(credentials).encode('ascii')
opener = urllib.request.build_opener(urllib.request.HTTPCookieProcessor(jar))
urllib.request.install_opener(opener)
req = opener.open('https://www.newbiecontest.org/forums/index.php?action=login2', credenc)
reqq = opener.open('https://www.newbiecontest.org/epreuves/prog/prog6.php')
q=str(reqq.read())
q=q.split(':')[1].replace('<br />', '')
print(q)
a=int(q.split('x')[0].replace(' ', ''))
b=int(q.split('x')[3].replace('b2','').replace(' ', ''))
q=q.split('x')[4].split('=')
if q[0] !=' ':
c=int(q[0].replace(' ', ''))
else:
c=0
y=int(q[1].replace('"','').replace(' ', ''))
print(a)
print(b)
print(c)
print(y)
x1 = (-b + math.sqrt((b*b)- (4 *a *c))) / (2*a)
x2 = (-b - math.sqrt((b*b)- (4 *a *c))) / (2*a)
if x1>x2:
x=x1
else:
x=x2
print(x)
print(str(x)[:len(str(x).split('.')[0])+3])
test2 = opener.open('https://www.newbiecontest.org/epreuves/prog/verifpr6.php?solution='+ str("%.2f" % x))
print(test2.read())
|
[
"neriki@free.fr"
] |
neriki@free.fr
|
9080eb145a61a76607a018edafa5a61503c3baa2
|
15826956f215966a3a4a628d2ee4dd43f2bb2820
|
/mywebsite/videorequest/views.py
|
be34303b8398af594f98ecf3a08f3126fc350f54
|
[] |
no_license
|
tusharcastic/django-deployment-example
|
3be0bc41775848d8fe60111be70c6996561cf9eb
|
ca355986eb7e598b2059547baae983b6b40bdc7c
|
refs/heads/master
| 2021-10-12T03:11:21.699756
| 2019-02-01T05:10:08
| 2019-02-01T05:10:08
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 700
|
py
|
from django.shortcuts import render,redirect
from . models import Video
# Create your views here.
from .forms import VideoForm
def index(request):
videos=Video.objects.order_by('date_added')
context={'videos':videos}
return render(request,'videorequest/index.html',context)
def vrform(request):
if request.method =='POST':
form=VideoForm(request.POST)
if form.is_valid():
new_req=Video(videotitle=request.POST['videoname'],videodesc=request.POST['videodesc'])
new_req.save()
return redirect('index')
else:
form=VideoForm()
context={'form': form}
return render(request,'videorequest/vrform.html',context)
|
[
"tush.bhoge@gmail.com"
] |
tush.bhoge@gmail.com
|
6716a5f542a8636259abde410ce1729c4fff1f86
|
1e019ee356416b73c4232351e250a90f2eefa8e7
|
/snippets/sqlite.py
|
59c6aed70a4cacf67d0494d56238764373d26f7f
|
[
"MIT"
] |
permissive
|
Teamworksapp/pydbvolve
|
cb131a66c72f42097f604726989463c6131afbd5
|
e18bb8cf98997c52de0cf30a8e106c480ba0bb57
|
refs/heads/master
| 2021-08-26T00:12:23.924347
| 2017-08-11T14:18:00
| 2017-08-11T14:18:00
| 87,454,422
| 5
| 2
|
MIT
| 2021-08-21T03:51:47
| 2017-04-06T17:03:20
|
Python
|
UTF-8
|
Python
| false
| false
| 967
|
py
|
import sqlite3
# Simpler is better, in this case. The main script works with dict types.
def dict_factory(cur, row):
return {col[0]: row[ix] for ix, col in enumerate(cur.description)}
# End dict_factory
# This is a subclass of sqlite3.Cursor that includes rudimentary __enter__, __exit__
# methods so it can be used in with context manager statements
class CMCursor(sqlite3.Cursor):
def __enter__(self):
return self
def __exit__(self, e_type, e_value, e_tb):
self.close()
# End class CMCursor
# This is a subclass of the sqlite3.Connection class. It does nothing but force
# dict_factory as the row factory and the CMCursor as the cursor factory
class CMConnection(sqlite3.Connection):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.row_factory = dict_factory
def cursor(self, *args, **kwargs):
return super().cursor(factory=CMCursor)
# End class CMConnection
|
[
"hproctor@lasllc.com"
] |
hproctor@lasllc.com
|
0d5894429e093898d91010ad4c8b2ec5d49d08ca
|
d883bda370722eb014ac4589602fbde725044777
|
/Final Project/decision_tree.py
|
a43c342d01fccbcfb5a67e71d85a7a4cdb9c840b
|
[] |
no_license
|
vvv44/AIFinalProject
|
4e74307d51487369f7868a2092bb65dc11019b27
|
4d58b48747ec438782caa203ad8ac9f059462af9
|
refs/heads/master
| 2022-06-04T02:41:30.075382
| 2020-05-02T19:24:01
| 2020-05-02T19:24:01
| 260,759,202
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,382
|
py
|
import numpy as np
import csv
import pandas as pd
class DecisionTreeNode(object):
# Constructor
def __init__(self, att, thr, left, right):
self.attribute = att
self.threshold = thr
# left and right are either binary classifications or references to
# decision tree nodes
self.left = left
self.right = right
def print_tree(self,indent=''):
# If prints the right subtree, corresponding to the condition x[attribute] > threshold
# above the condition stored in the node
if self.right in [0,1]:
print(indent+' ','class=',self.right)
else:
self.right.print_tree(indent+' ')
print(indent,'if x['+str(self.attribute)+'] <=',self.threshold)
if self.left in [0,1]:
print(indent+' ','class=',self.left)
else:
self.left.print_tree(indent+' ')
class DecisionTreeClassifier(object):
# Constructor
def __init__(self, max_depth=10, min_samples_split=10, min_accuracy =1):
self.max_depth = max_depth
self.min_samples_split = min_samples_split
self.min_accuracy = min_accuracy
def fit(self,x,y):
self.root = self._id3(x,y,depth=0)
def predict(self,x_test):
pred = np.zeros(len(x_test),dtype=int)
for i in range(len(x_test)):
pred[i] = self._classify(self.root,x_test[i])
return pred
#this method checks several thresholds per attribute and chooses the one with highest information gain
def _best_treshold_generate(self,x,y, orig_entropy):
best_thrs = np.zeros(x.shape[1])
best_gains = np.zeros(x.shape[1])
for i in range(x.shape[1]):
#We first generate several random values for an attribute
thr = np.random.uniform(min(x[:,i]),max(x[:,i]),x.shape[1])
entropy_thr = np.zeros(thr.shape[0])
#We now check the entropy to get info gain of each attribute and threshold
for j in range(thr.shape[0]):
less = x[:,i] <= thr[j]
more = ~ less
entropy_thr[j] = self._entropy(y[less], y[more])
#Get the gains of the attribute and choose the threshold that yields the best gain
gains = orig_entropy - entropy_thr
best_gains[i] = np.max(gains)
best_thrs[i] = thr[np.argmax(gains)]
best_att = np.argmax(best_gains)
#Return a tuple containing the index of the best attribute, and the best threshold (from that attribute)
return (best_att, best_thrs[best_att])
def _id3(self,x,y,depth):
if y.size==0:
return
mean_val = np.mean(y) #check for possible empty y array
if depth >= self.max_depth or len(y) <= self.min_samples_split or max([mean_val,1-mean_val])>=self.min_accuracy:
return int(round(mean_val))
orig_entropy = self._entropy(y, [])
# thr = np.mean(x,axis=0)
# entropy_attribute = np.zeros(len(thr))
# for i in range(x.shape[1]):
# less = x[:,i] <= thr[i]
# more = ~ less
# entropy_attribute[i] = self._entropy(y[less], y[more])
# gain = orig_entropy - entropy_attribute
#
# #print('Gain:',gain)
# best_att = np.argmax(gain)
best_att_thr = self._best_treshold_generate(x,y,orig_entropy)
#We extract the treshold from the second "row" and attribute from first row
best_thr = best_att_thr[1]
#We extract the best attribute index
best_att = best_att_thr[0]
#We proceed to divide values
less = x[:,best_att] <= best_thr
more = ~ less
#Will have to divide x and y into xl, yl and xr, yr to give to the node, so that it gives a reference to a decision tree
return DecisionTreeNode(best_att, best_thr, self._id3(x[less],y[less],depth+1), self._id3(x[more],y[more],depth+1))
def _entropy(self,l,m):
ent = 0
for p in [l,m]:
if len(p)>0:
pp = sum(p)/len(p)
pn = 1 -pp
if pp<1 and pp>0:
ent -= len(p)*(pp*np.log2(pp)+pn*np.log2(pn))
ent = ent/(len(l)+len(m))
return ent
def _classify(self, dt_node, x):
if dt_node in [0,1]:
return dt_node
if x[dt_node.attribute] <= dt_node.threshold:
return self._classify(dt_node.left, x)
else:
return self._classify(dt_node.right, x)
def display(self):
print('Model:')
self.root.print_tree()
def processCSV():
with open('placement_data.csv') as file:
read = csv.reader(file)
lines = list(read)
for row in lines:
if(row[1]=='M'):
row[1] = 1
else:
row[1] = 0
del row[3]
del row[4]
del row[4]
del row[5]
if(row[5]=='No'):
row[5] = 0
else:
row[5] = 1
del row[7]
del row[9]
if(row[8]== 'Placed'):
row[8] = 1
else:
row[8] = 0
del lines[0]
writer = csv.writer(open('placement_data_cleaned.csv', 'w'))
writer.writerows(lines)
def processIncomeCSV():
with open('income_evaluation.csv') as file:
read = csv.reader(file)
lines = list(read)
for row in lines:
if(row[9]==' Male' or row[9]=='Male'):
row[9] = 1
else:
row[9] = 0
del row[1]
del row[1]
del row[1]
del row[2]
del row[2]
del row[2]
del row[4]
del row[4]
del row[5]
if(row[5] == " <=50K" or row[5] == "<=50K"):
row[5] = 0
else:
row[5] = 1
#we make white be one and other be 0
if(row[2] == " White" or row[2] == "White"):
row[2] = 1
else:
row[2] = 0
del lines[0]
writer = csv.writer(open('income_evaluation_cleansed.csv', 'w',newline=''))
writer.writerows(lines)
#Code for the first dataset
# with open('placement_data_cleaned.csv') as file:
# read = csv.reader(file)
# lines = list(read)
# x = np.array(lines).astype(np.float)
# y = x[:,8]
# x = np.delete(x,-1,axis=1)
# x = np.delete(x,0,axis = 1)
#code for the second dataset
with open('income_evaluation_cleansed.csv') as file:
read = csv.reader(file)
lines = list(read)
x = np.array(lines).astype(np.float)
y = x[:,5]
x = np.delete(x,-1,axis = 1)
#Split data into training and testing
ind = np.random.permutation(len(y))
split_ind = int(len(y)*0.8)
x_train = x[ind[:split_ind]]
x_test = x[ind[split_ind:]]
y_train = y[ind[:split_ind]]
y_test = y[ind[split_ind:]]
model = DecisionTreeClassifier()
model.fit(x_train, y_train)
train_pred = model.predict(x_train)
test_pred = model.predict(x_test)
train_acc = np.sum(train_pred==y_train)/len(train_pred)
print('train accuracy:', train_acc)
test_acc = np.sum(test_pred==y_test)/len(test_pred)
print('test accuracy:', test_acc)
#Prun the tree, by getting rid of unnecessary comparisons(nodes), collapsing those
#which children classify to the same number. (Done for decision trees)
def post_prunning_decision_tree(root):
if isinstance(root,int):
return root
#Do left subtree then right subtree
root.left = post_prunning_decision_tree(root.left)
root.right = post_prunning_decision_tree(root.right)
#if classifications are same
if root.left==root.right:
#Give value to substitute the root for
new_root = root.left
temp = root
root = None
del(temp)
return new_root
else:
return root
post_prunning_decision_tree(model.root)
model.display()
|
[
"noreply@github.com"
] |
vvv44.noreply@github.com
|
3524e927dc7c490515bcea4140b896f22ca600bf
|
2376c131bf62f305ad3b67f00c602b37d1d09265
|
/Fitory/FitApp/forms.py
|
802f3eb0133fc6b038588c7139ed87c22c05f3d2
|
[] |
no_license
|
dvdgatik/FitoryWeb
|
e85f009479e2af0918ab38c72a1eb329f8c9cf1b
|
1964691fca943ad404cd83781be69d4576650085
|
refs/heads/master
| 2023-02-05T20:21:14.420245
| 2020-12-31T11:45:59
| 2020-12-31T11:45:59
| 325,788,526
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,093
|
py
|
from django import forms
from django.forms import ModelForm, Textarea,SelectDateWidget
from image_cropping import ImageCropWidget,ImageRatioField
from .models import Estado, Ciudad, Servicio, Actividad, Club, ServicioClub, ActividadClub, ActividadHorario, Sucursal, Foto, EvaluacionSucursal, Cliente, EvaluacionCliente, Favorito, Horario, RegistroHorario, Sesion, Subscripcion, Visita, PagoSucursal, Usuario
class cropFotoClub(forms.ModelForm):
class Meta:
model = Club
fields = {'foto','fotocrop'}
widgets = {
'foto': ImageCropWidget,'fotocrop':ImageRatioField('foto','200x200'),
}
class cropLogoSucursal(forms.ModelForm):
class Meta:
model = Sucursal
fields = {'logo','logocrop'}
widgets = {
'logo': ImageCropWidget,'logocrop':ImageRatioField('logo','200x200'),
}
class cropFotoCliente(forms.ModelForm):
class Meta:
model = Cliente
fields = {'foto','fotocrop'}
widgets = {
'foto': ImageCropWidget,'fotocrop':ImageRatioField('foto','200x200'),
}
|
[
"davgatik@gmail.com"
] |
davgatik@gmail.com
|
a4750e44ec2f69260818178da844f2c7d29155d3
|
b6aa4252e01c86e1f5bad2cf8e867b40bd3ebde0
|
/projects/migrations/0007_auto_20190210_1457.py
|
f2d6ab02ce1751ed84c5c9855e70895721a8a06e
|
[] |
no_license
|
degerli/wagtail-portfolio-1
|
2b0a2d576d706caad035a47a465e4a5d9999f405
|
69fd69a9533743e263f9d21d9aeac099ffa77812
|
refs/heads/master
| 2020-04-25T00:50:24.169432
| 2019-02-19T19:44:03
| 2019-02-19T19:44:03
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 496
|
py
|
# Generated by Django 2.1.5 on 2019-02-10 14:57
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('projects', '0006_auto_20190210_1454'),
]
operations = [
migrations.AlterField(
model_name='individualprojectspage',
name='technology_used',
field=models.CharField(blank=True, choices=[('WP', 'WordPress'), ('PS', 'Prestashop'), ('DJ', 'Django')], max_length=255),
),
]
|
[
"raf@webconexus.nl"
] |
raf@webconexus.nl
|
079dd4266aad26fae72f8b1508ad6b63f9b96300
|
3cd426bf2d766a19b90799bb62f93deaeaf993b4
|
/tic_tac_toe_oop.py
|
bb70ea5b7065b4838ae47309e0de323cab60fbc4
|
[] |
no_license
|
jpiversen/tic_tac_toe
|
7cd0ba9c55713fb6f7dde157cc67a87a0dca57d9
|
9f6d8b8cf3d4d23c65dcd4c7923ebe9984ac195a
|
refs/heads/master
| 2022-12-16T18:03:39.417270
| 2020-09-16T07:13:45
| 2020-09-16T07:13:45
| 286,309,580
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,676
|
py
|
from os import system, name
import random
class board:
def __init__(self):
# Define board
self.placeholder_text = " "
self.board = []
self.reset_board()
def reset_board(self):
self.board = [self.placeholder_text for i in range(9)]
def print_board(self):
str = """
| {} | {} | {} |
--- --- ---
| {} | {} | {} |
--- --- ---
| {} | {} | {} |
""".format(*self.board)
print(str)
def place_symbol(self, index, symbol, verbose = True):
if self.board[index] == self.placeholder_text:
self.board[index] = symbol
return True
else:
if verbose:
print("This space is taken. Please enter another number. ")
return False
class game:
def __init__(self, p1, p2, board, visible = True):
self.visible = visible
self.game_over = False
self.is_winner = False
self.winner = None
self.board = board
self.p1 = p1
self.p2 = p2
self.current_player = self.p1
self.games_played = 0
# Welcome message
if self.visible:
print("Welcome to a game of Tic Tac Toe! Press 'q' to quit or 'h' for help.")
self.board.print_board()
def clear(self):
# for windows
if name == 'nt':
_ = system('cls')
# for mac and linux(here, os.name is 'posix')
else:
_ = system('clear')
def print_help(self):
help_txt = """
You are now playing a game of Tic Tac Toe.
The 3x3 grid is numerated like the numpad at a phone: 1 is at the top left and 9 is at the bottom right, like this: \n
| 1 | 2 | 3 |
--- --- ---
| 4 | 5 | 6 |
--- --- ---
| 7 | 8 | 9 | \n
Choose a number to place your symbol.
The first player to get three in a row - either horizontally, vertically or diagonally - wins.
If the board is full and nobody won yet, then it's a tie.
"""
self.clear()
print(help_txt)
input("Press 'enter' to go back to the game.")
self.clear()
self.board.print_board()
def check_winner(self):
current_board = self.board.board
row1 = (current_board[0] == current_board[1] == current_board[2]) and self.board.placeholder_text not in current_board[0:2]
row2 = (current_board[3] == current_board[4] == current_board[5]) and self.board.placeholder_text not in current_board[3:5]
row3 = (current_board[6] == current_board[7] == current_board[8]) and self.board.placeholder_text not in current_board[6:8]
col1 = (current_board[0] == current_board[3] == current_board[6]) and self.board.placeholder_text not in current_board[0:9:3]
col2 = (current_board[1] == current_board[4] == current_board[7]) and self.board.placeholder_text not in current_board[1:9:3]
col3 = (current_board[2] == current_board[5] == current_board[8]) and self.board.placeholder_text not in current_board[2:9:3]
diagonal1 = (current_board[0] == current_board[4] == current_board[8]) and current_board[4] != self.board.placeholder_text
diagonal2 = (current_board[2] == current_board[4] == current_board[6]) and current_board[4] != self.board.placeholder_text
# Check if there's a winner
self.is_winner = row1 or row2 or row3 or col1 or col2 or col3 or diagonal1 or diagonal2
self.winner = self.current_player
if self.is_winner:
self.game_over = True
if self.visible:
print(self.current_player.symbol + " won! Congratulations!")
print()
elif self.board.placeholder_text not in current_board:
self.game_over = True
if self.visible:
print("Board is full. It's a tie.")
print()
else:
# Change player's turn
if self.current_player == self.p1:
self.current_player = self.p2
else:
self.current_player = self.p1
if self.game_over:
self.give_reward()
def give_reward(self):
if self.winner == self.p1:
self.p1.get_reward(10)
self.p2.get_reward(-10)
elif self.winner == self.p2:
self.p1.get_reward(-10)
self.p2.get_reward(10)
else:
# Draw is better for p2, as p1 has a higher prob of winning
self.p1.get_reward(0)
self.p2.get_reward(2)
def quit_game(self):
print("Goodbye!")
self.game_over = True
quit()
def play(self):
while not self.game_over:
if self.visible:
print(self.current_player.symbol + "'s turn!")
move_finished = False
while not move_finished:
choice = self.current_player.choose_move(board = self.board.board, free_pos_txt = self.board.placeholder_text)
# Quit if input is "q"
if choice == "q":
self.quit_game()
# Provide help if input is "h"
elif choice == "h":
self.print_help()
continue
# Check that input is valid
elif choice not in [str(i) for i in range(1,10)]:
print("'" + choice + "'" + " is invalid. Please enter a number between 1 and 9.")
continue
# Input is a valid move, continue with game logic
else:
# Set the choice to the same indexing as the board
choice_board_index = int(choice) -1
move_finished = self.board.place_symbol(choice_board_index, self.current_player.symbol, verbose = self.current_player.type == "human")
if self.visible:
self.clear()
self.board.print_board()
self.check_winner()
class human_player:
def __init__(self, symbol = "x"):
self.symbol = symbol
self.type = "human"
def choose_move(self, board = None, free_pos_txt = None):
# board and free_pos_txt are inputs for the AI player, but is not used for the human
choice = input("Enter your move (1-9): ").strip().lower()
return choice
def get_reward(self, reward):
pass
class ai_player:
def __init__(self, symbol = "y", er = 0.1, lr = 0.8, discount_factor = 0.95):
self.symbol = symbol
self.type = "machine"
self.explore_rate = er
self.states = []
self.states_value = {}
self.lr = lr # Learning rate
self.discount_factor = discount_factor
def get_possible_moves(self, board, free_pos_txt):
return [i+1 for i, x in enumerate(board) if x == free_pos_txt]
def choose_move(self, board, free_pos_txt):
current_board = board.copy()
possible_moves = self.get_possible_moves(current_board, free_pos_txt)
if random.uniform(0, 1) < self.explore_rate:
choice = random.choice(possible_moves) # random choice from possible_moves
else:
value_max = -999
for move in possible_moves:
next_board = current_board.copy()
next_board[move-1] = self.symbol
hash_next_board = "".join(next_board)
value = random.uniform(-5, 5) if self.states_value.get(hash_next_board) is None else self.states_value.get(hash_next_board)
if value > value_max:
value_max = value
choice = move
# Hash next board
next_board = current_board.copy()
next_board[choice -1] = self.symbol
hash_next_board = "".join(next_board)
self.states.append(hash_next_board)
choice = str(choice)
return choice
def get_reward(self, reward):
for state in reversed(self.states):
if self.states_value.get(state) is None:
self.states_value[state] = 0 #random.uniform(-5, 5)
self.states_value[state] += self.lr * (self.discount_factor * reward - self.states_value[state])
reward = self.states_value[state]
# Reset states
self.states = []
# Start game!
iterations = 200000
p1 = ai_player(symbol = "x", er = 1)
p2 = ai_player(er = 1)
print("Training the AI:")
for i in range(iterations):
if i % 100 == 0:
print(str(i).rjust(len(str(iterations))) + "/" + str(iterations))
er = 1 - (i / iterations)
if er > 0.9:
er = 1
if er < 0.1:
er = 0.1
p1.explore_rate = er
p2.explore_rate = er
tic_tac_toe = game(p1, p2, board(), visible = False)
tic_tac_toe.play()
tic_tac_toe.clear()
# Play the game!
play_again = True
while play_again:
tic_tac_toe = game(human_player(), p2, board())
tic_tac_toe.play()
play_again = input("Play again [y/n]: ") == "y"
|
[
"jan.p.iversen@gmail.com"
] |
jan.p.iversen@gmail.com
|
cbe3caf0e708d976b345b5027044c01b3de3bea0
|
8a3e3c00cecf59bcae98c1648427f61b3769a03b
|
/Intro-Python-CS1/Sprint-Challenge--Intro-Python/src/comp/comp.py
|
402a17d885e6d6319ec5d68b2b1281e8e19a18e2
|
[] |
no_license
|
nvisagan/Intro-Python-CSS1
|
af478a362033e3691dc91448d5e337dcd8dccca7
|
93b033165493335f1becb8238e8aef6fce4f7f8b
|
refs/heads/master
| 2021-05-25T21:43:08.074857
| 2020-04-07T23:04:55
| 2020-04-07T23:04:55
| 253,931,935
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,321
|
py
|
# The following list comprehension exercises will make use of the
# defined Human class.
class Human:
def __init__(self, name, age):
self.name = name
self.age = age
def __repr__(self):
return f"<Human: {self.name}, {self.age}>"
humans = [
Human("Alice", 29),
Human("Bob", 32),
Human("Charlie", 37),
Human("Daphne", 30),
Human("Eve", 26),
Human("Frank", 18),
Human("Glenn", 42),
Human("Harrison", 12),
Human("Igon", 41),
Human("David", 31),
]
# Write a list comprehension that creates a list of names of everyone
# whose name starts with 'D':
print("Starts with D:")
a = [x.name for x in humans if x.name[0]== "D"]
print(a)
# Write a list comprehension that creates a list of names of everyone
# whose name ends in "e".
print("Ends with e:")
b = [x.name for x in humans if x.name[-1]== "e"]
print(b)
# Write a list comprehension that creates a list of names of everyone
# whose name starts with any letter between 'C' and 'G' inclusive.
print("Starts between C and G, inclusive:")
c = [x.name for x in humans if x.name[0]>= "C" and x.name[0]<="G"]
print(c)
# Write a list comprehension that creates a list of all the ages plus 10.
print("Ages plus 10:")
d = [x.age+10 for x in humans]
print(d)
# Write a list comprehension that creates a list of strings which are the name
# joined to the age with a hyphen, for example "David-31", for all humans.
print("Name hyphen age:")
e = [f"{x.name}-{x.age}" for x in humans]
print(e)
# Write a list comprehension that creates a list of tuples containing name and
# age, for example ("David", 31), for everyone between the ages of 27 and 32,
# inclusive.
print("Names and ages between 27 and 32:")
f = [(x.name,x.age) for x in humans if x.age >=27 and x.age<=32]
print(f)
# Write a list comprehension that creates a list of new Humans like the old
# list, except with all the names uppercase and the ages with 5 added to them.
# The "humans" list should be unmodified.
print("All names uppercase:")
g = [Human(x.name.upper(), x.age+5) for x in humans]
print(g)
# Write a list comprehension that contains the square root of all the ages.
print("Square root of ages:")
import math
h = [math.sqrt(x.age) for x in humans]
print(h)
|
[
"noreply@github.com"
] |
nvisagan.noreply@github.com
|
254fa2b9909b4cf0ad43eb4a299b9500957c2e30
|
9011cbf7413608c32ed0bad0f5e1568b8b6b9c34
|
/forum/migrations/0012_rename_usermodel_profile.py
|
0dded3f54df8946b66be0686f143b99c70517417
|
[] |
no_license
|
apurbadh/Orion
|
e09f000c8df33b24fddd0434494a4f6af7ab0c7e
|
82231197ccf0299941e3b7631d049bbfb5953a78
|
refs/heads/main
| 2023-09-05T12:31:04.911527
| 2021-10-16T16:11:13
| 2021-10-16T16:11:13
| 400,513,046
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 432
|
py
|
# Generated by Django 3.2.6 on 2021-08-23 00:20
from django.conf import settings
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('forum', '0011_auto_20210823_0011'),
]
operations = [
migrations.RenameModel(
old_name='UserModel',
new_name='Profile',
),
]
|
[
"erikduke182@gmail.com"
] |
erikduke182@gmail.com
|
be9ce7135e19b5c6f9bee27fe3eea1c9f56a2fd0
|
7615f326b4109fe4bfc30b2aaff5784d69610f40
|
/blackj4ckPY.py
|
b4671e0fa830625b2dd26eafbd150cbbd233d099
|
[] |
no_license
|
attila5287/blackjack_app
|
bc8022048edc6c247ab12a0c6808f54eed503af1
|
baa2a1f73703b20de278c401aa272a31b5c79cf8
|
refs/heads/master
| 2023-02-08T08:36:45.149201
| 2019-10-10T01:06:23
| 2019-10-10T01:06:23
| 196,545,663
| 0
| 0
| null | 2023-02-02T06:35:23
| 2019-07-12T09:00:15
|
Python
|
UTF-8
|
Python
| false
| false
| 6,761
|
py
|
from Card_Deck_Hand import Card, Deck, Hand
# -------------- 1-ON-1 BLACKJACK ------------------------
class blackjack_1on1(Deck):
""" CLASS INH FROM DECK THAT DEALS CARDS AND REPORTS THE PROBLTY """
def __init__(self, player_name = 'Attila'):
self.player_name = player_name
self.cards = [Card(suit, rank) for suit in range(len(Card.suit_l1st)) for rank in range(1, len(Card.rank_l1st))] * 6
print('self-cards')
print(len(self.cards))
self.shuffle()
self.player = Hand(play3r_name = self.player_name)
self.de4ler = Hand('de4ler')
self.player_blackjack = 0
self.player_busted = 0
self.playerStands_dealerWins = 0
self.dealer_blackjack = 0
self.dealer_busted = 0
self.dealerStands_playerWins = 0
self.game_push = 0
print('\n...play blackjack 1-on-1...\n')
def display_with_style(self, display_message=''):
print('\n...' + display_message + '...\n')reload
def deal_first_cards(self):
self.display_with_style('first cards')
self.move_cards(self.player,1)
self.move_cards(self.de4ler,1)
print('>>>--- ' + str(self.player_name) + ' --- >')
self.show_player_total()
print(' < --- de4ler --- <<<')
self.show_dealer_total()
def show_dealer_total_cardClosed(self):
open_card_list = self.de4ler.to_l1st()[0].__split2rows__()
closed_card_list = [
'┌───┐' ,
'│♦♦♦│' ,
'│♦♦│' ,
'│♦♦♦│' ,
'└───┘'
]
for openCard_split2row, closedCard2row in zip(open_card_list, closed_card_list):
print(openCard_split2row,closedCard2row)
def show_player_total(self):
print(self.player)
player_total = 0
for number in self.player.blackjack_sum():
player_total += number
print('player total is: ' + str(player_total))
def deal_second_cards(self):
self.display_with_style ('second cards')
self.move_cards(self.player,1)
print('>>>--- ' + str(self.player_name) + ' --- >')
self.show_player_total()
print(' < --- de4ler --- <<<')
self.show_dealer_total_cardClosed()
self.move_cards(self.de4ler,1)
self.show_dealer1st_total()
def show_dealer1st_total(self):
dealer_total = self.de4ler.blackjack_sum()[0]
print('dealer first card only total is: ' + str(dealer_total))
def show_dealer_total(self):
dealer_total = 0
for numb3r in self.de4ler.blackjack_sum():
dealer_total += numb3r
print('dealer total is: ' + str(dealer_total))
print(self.de4ler)
def player_hits(self):
print('>>>--- ' + str(self.player_name) + ' --- >')
self.display_with_style('player hits')
self.move_cards(self.player,1)
self.show_player_total()
def dealer_hits(self):
self.display_with_style('dealer hits')
self.move_cards(self.de4ler, 1)
self.show_dealer_total()
def player_calc_total(self):
current_total = 0
for number in self.player.blackjack_sum():
current_total += number
return current_total
def dealer_calc_total(self):
dealer_total = 0
for number in self.de4ler.blackjack_sum():
dealer_total += number
return dealer_total
def the_play(self, upper_limit = 15):
pass
self.display_with_style('simulation upper limit is {}'.format(upper_limit))
if self.player_calc_total() == 21:
self.player_blackjack += 1
self.sim_totals()
elif self.player_calc_total() >= upper_limit:
self.display_with_style('player stands')
self.dealer_plays(17)
while self.player_calc_total() <= upper_limit:
self.player_hits()
if self.player_calc_total()>= 22:
self.display_with_style('player busted, dealer wins')
self.player_busted += 1
self.sim_totals()
elif self.player_calc_total() >= upper_limit:
self.display_with_style('player stands')
self.dealer_plays(17)
def dealer_plays(self, upper_limit = 17):
pass
print(' < --- de4ler --- <<<')
print('...second card up....')
self.show_dealer_total()
if self.dealer_calc_total() == 21:
self.dealer_blackjack +=1
elif self.dealer_calc_total() > self.player_calc_total():
self.display_with_style('\t\t\tdealer wins')
self.playerStands_dealerWins += 1
self.sim_totals()
elif self.dealer_calc_total() == self.player_calc_total():
self.display_with_style('\t\t\tboth sides push')
self.game_push += 1
self.sim_totals()
while self.dealer_calc_total() <= self.player_calc_total() & self.dealer_calc_total() <= 20 :
self.dealer_hits()
if self.dealer_calc_total() >= 22:
self.display_with_style('\t\t\tdealer busted, player wins')
self.dealer_busted += 1
self.sim_totals()
elif self.dealer_calc_total() >= self.player_calc_total():
if self.dealer_calc_total() <=21:
self.display_with_style('\t\t\tdealer wins')
self.playerStands_dealerWins += 1
self.sim_totals()
def sim_results(self):
print('player wins, blackjack: {}'.format(self.player_blackjack))
print('player busted: {}'.format(self.player_busted))
print('player stands, dealer wins: {}'.format(self.playerStands_dealerWins))
print('player stands-wins, dealer busted: {}'.format(self.dealer_busted))
print('player stands, dealer push: {}'.format(self.game_push))
print('num of cards remaining in deck: {}'.format(len(self.cards)))
def sim_totals(self):
pass
print('\t\t\t\t\tplayer total is: {}'.format(self.player_calc_total()))
print('\t\t\t\t\tdealer total is: {}'.format(self.dealer_calc_total()))
def simulation_mode(self, number_of_rounds = 10):
pass
for n in range(number_of_rounds):
self.deal_first_cards()
self.deal_second_cards()
self.the_play(13)
self.player.cards = []
self.de4ler.cards = []
self.sim_results()
# --------------------------------------
if __name__ == "__main__":
pass
test_game = blackjack_1on1()
test_game.simulation_mode()
|
[
"atiturkoz@hotmail.com"
] |
atiturkoz@hotmail.com
|
4f5015ddbd668fd1c8c83c6f8a9d803f097b4891
|
43c8d27c72571d0a45a9ed5126fcee3b89f309df
|
/recommender.py
|
4c9d68dda8b238b056a00ed67105e9e21fa19f13
|
[
"Apache-2.0"
] |
permissive
|
weaponsjtu/RecommederSystem
|
592463a1e57eec5dcb80aca3de9905f08774dc0f
|
0d148117da5299f76acaae93ff096ac98ba106e6
|
refs/heads/master
| 2021-01-25T06:40:04.839769
| 2015-01-08T09:09:54
| 2015-01-08T09:09:54
| 27,113,958
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,506
|
py
|
import math
import time
class UserBased():
def __init__(self, matrix, user_list, item_list, sim_mat = None, related_users = None, user_item_mat = None):
# train matirx
self.matrix = matrix
self.user_list = user_list
self.item_list = item_list
# find the TOP-3 highest similarity users for each item , and overall
def refer_user(self, ten_offering):
# TOP-3 highest similarity users
print "Overall\t"
for i in range( len(self.user_list) ):
uid = self.user_list[i]
u_sim = self.sim_mat[i]
u_sim_dic = {}
for u in range( len(u_sim) ):
u_sim_dic[u] = u_sim[u]
u_sim = sorted(u_sim_dic.items(), key = lambda d: -d[1])[:3]
u_refer = []
for u in u_sim:
u_refer.append( str(self.user_list[u[0]]) )
print "%s\t%s"%(str(uid), ','.join(u_refer))
# TOP-3 highest similarity users for each item
for item_index in ten_offering:
print str(item_index) + '\t'
for i in range( len(user_list) ):
uid = self.user_list[i]
u_sim = self.sim_mat[i]
u_sim_dic = {}
for u in range( len(u_sim) ):
if self.matrix[u][ item_index - 1 ] > 0:
u_sim_dic[u] = u_sim[u]
u_sim = sorted(u_sim_dic.items(), key = lambda d: -d[1])[:3]
u_refer = []
for u in u_sim:
u_refer.append( str(self.user_list[u[0]]) )
print "%s\t%s"%(str(uid), ','.join(u_refer))
def user_similarity(self):
print "function user_similarity"
start = time.time()
rows = len(self.matrix)
cols = len(self.matrix[0])
mat = []
for i in range(rows):
u_sim = [0] * rows
if sum( self.matrix[i] ) < 0.1:
mat.append(u_sim)
continue
for j in range(i + 1, rows):
user_a = self.matrix[i]
user_b = self.matrix[j]
if sum( user_a ) < 0.1 and sum( user_b ) < 0.1:
continue
sim = 0
for k in range(cols):
#if item_index != (k + 1):
# sim = sim + user_a[k] * user_b[k]
sim = sim + user_a[k] * user_b[k]
if sim > 0:
sim = sim * 1.0 / ( math.sqrt( sum(user_a) ) * math.sqrt( sum(user_b) ) )
u_sim[j] = sim
for j in range(0, i):
u_sim[j] = mat[j][i]
mat.append(u_sim)
end = time.time()
print str( end - start ) + ' seconds'
self.sim_mat = mat
def related_users(self, K):
print "function related_users"
start = time.time()
related = {}
rows = len(user_sim)
for i in range( rows ):
u_sim = {}
for x in range(len(self.sim_mat[i])):
u_sim[x] = self.sim_mat[i][x]
u_sim = sorted(u_sim.items(), key = lambda d: -d[1])[:K]
related[i] = u_sim
end = time.time()
print str( end - start ) + ' seconds'
self.related_users = related
def predict_user_based():
print "function predict_user_based"
start = time.time()
rows = len(self.matrix)
cols = len(self.matrix[0])
# user item matrix, probability
user_item_mat = []
for i in range(rows):
u_item = [0] * cols
for j in range(cols):
# based on user-similarity
u_sim = self.related_users[i]
sum_u_sim = 0
for sim in u_sim:
u_item[j] = u_item[j] + sim[1] * self.matrix[ sim[0] ][j]
sum_u_sim = sum_u_sim + sim[1]
if sum_u_sim > 0:
u_item[j] = u_item[j] * 1.0 / sum_u_sim
user_item_mat.append(u_item)
#for each item, we get a user ranking list
end = time.time()
print str( end - start ) + ' seconds'
self.user_item_mat = user_item_mat
def ranking_users( user_item_mat, item_index ):
print "hello"
# TODO
class ItemBased():
def __init__(self, matrix, user_list, item_list, item_sim = None, related_items = None, user_item_mat = None):
self.matrix = matrix
self.user_list = user_list
self.item_list = item_list
def item_similarity(self):
print "function item_similarity"
rows = len(self.matrix)
cols = len(self.matrix[0])
mat = []
for i in range(cols):
i_sim = [0] * cols
for j in range(i + 1, cols):
user_a = []
user_b = []
sim = 0
for k in range(rows):
user_a.append(self.matrix[k][i])
user_b.append(self.matrix[k][j])
sim = sim + self.matrix[k][i] * self.matrix[k][j]
if sim > 0:
sim = sim * 1.0 / ( math.sqrt( sum(user_a) ) * math.sqrt( sum(user_b) ) )
i_sim[j] = sim
for j in range(0, i):
i_sim[j] = mat[j][i]
mat.append(i_sim)
self.sim_mat = mat
def related_items(self, K):
related = {}
rows = len( self.item_sim )
for i in range( rows ):
i_sim = {}
for x in range(len(self.item_sim[j])):
i_sim[x] = self.item_sim[j][x]
i_sim = sorted(i_sim.items(), key = lambda d: -d[1])[:K]
related[i] = i_sim
self.related_items = related
def predict_item_based(self):
print "function predict_item_based"
rows = len(self.matrix)
cols = len(self.matrix[0])
# user item matrix, probability
user_item_mat = []
for i in range(rows):
i_item = [0] * cols
for j in range(cols):
i_sim = self.related_items[i]
sum_i_sim = 0
for sim in i_sim:
i_item[j] = i_item[j] + sim[1] * self.matrix[i][ sim[0] ]
sum_i_sim += sim[1]
if sum_i_sim > 0:
i_item[j] = i_item[j] * 1.0 / sum_i_sim
user_item_mat.append(i_item)
#for each item, we get a user ranking list
self.user_item_mat = user_item_mat
|
[
"javawebzwp@gmail.com"
] |
javawebzwp@gmail.com
|
1cb86d8bcc4f103101dcac632edba87aa1a4f73f
|
35f91893173471ee30bf2438a60a865307b3b08d
|
/gdp/urls.py
|
106776407bebcd3893f12bd26e9a7eb9a45dd2bc
|
[] |
no_license
|
csetarun/django-basic
|
cfb9bd3178db3832162c77dfd19ded57a9e48a8e
|
4ff5ae11149f758f70f6ac28937d5de4f912cca8
|
refs/heads/master
| 2020-03-23T10:00:30.558304
| 2018-07-18T10:43:34
| 2018-07-18T10:43:34
| 141,420,620
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 167
|
py
|
from .views import ListCountries
from django.conf.urls import include, url
from django.contrib import admin
urlpatterns = [
url(r'^$', ListCountries.as_view()),
]
|
[
"noreply@github.com"
] |
csetarun.noreply@github.com
|
0b39fa028df21f2382d22e21b76c38dae502a0a8
|
19b7c6149efe5ad3da76479da7131c2d66293194
|
/pcap包dns解析.py
|
ad3db6eea779ebedb4ea1a5658a573bafb47969a
|
[] |
no_license
|
jitianze/yanzixu
|
7aab20183b394cd08397ae67015bd60249098cbc
|
df961cb6ee41af5e335974d49dae458a40e3f1f8
|
refs/heads/master
| 2021-01-12T06:50:58.711991
| 2017-09-30T01:06:33
| 2017-09-30T01:06:33
| 76,844,333
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,005
|
py
|
#!/usr/bin/python
#-*-coding:utf-8-*-
import struct
import dpkt
import sys
import socket
import binascii
import os
#mulu =raw_input("Please enter the directory you want to parse:")
DNS_A = 1 # RR types
DNS_IN = 1 # RR classes
def addr2str(addrobj):
if len(addrobj) != 4:
return "addr error!"
else:
return str(ord(addrobj[0]))+"."+str(ord(addrobj[1]))+"."+str(ord(addrobj[2]))+"."+str(ord(addrobj[3]))
def dns_response_body_parse(body): # parse the response message's body
identification = body[0:2]
flag = body[2:4]
num_ques = body[4:6]
num_ans_RR = body[6:8]
num_auth_RR = body[8:10]
num_addi_RR = body[10:12]
query_name = ''
ans_ip = []
flag = 12
while(ord(body[flag])!=0x0):
query_name = query_name + body[flag+1:flag+ord(body[flag])+1]
flag = flag + ord(body[flag]) + 1
try:
if ord(body[flag]) != 0x0:
query_name = query_name+'.'
except Exception, e:
print "error when parse query domain name"
#print query_name
flag = flag + 1
query_type = ord(body[flag])*256 + ord(body[flag+1])
if query_type == 0x01: # use domain query IP addr
flag = flag + 4
i = 1
answer_num = ord(num_ans_RR[0])*256 + ord(num_ans_RR[1])
while(i<=answer_num):
if ord(body[flag]) == 0xc0:
flag = flag + 2
else:
while(ord(body[flag])!=0x0):
flag = flag + ord(body[flag]) + 1
flag = flag + 1
if ( ord(body[flag])*256+ord(body[flag+1]) == DNS_A
and ord(body[flag+2])*256+ord(body[flag+3]) == DNS_IN):
flag = flag + 8
RR_data_len = ord(body[flag])*256 + ord(body[flag+1])
if RR_data_len == 4:
ans_ip.append(addr2str(body[flag+2:flag+6]))
flag = flag + ord(body[flag])*256 + ord(body[flag+1]) + 2
else:
flag = flag + 8
flag = flag + ord(body[flag])*256 + ord(body[flag+1]) + 2
i = i + 1
else:
print "query type is PTR not A"
print ("Finished, the result was in file dns-message.txt ")
return
return "%s\t%s"%(query_name,ans_ip)
def jiexi():
def GetFileList(dir, fileList):
newDir = dir
if os.path.isfile(dir):
fileList.append(dir.decode('gbk'))
elif os.path.isdir(dir):
for s in os.listdir(dir):
newDir=os.path.join(dir,s)
GetFileList(newDir, fileList)
return fileList
mulu =raw_input("Please enter the directory you want to parse:")
wenjianlist = GetFileList(mulu, [])
#list 现在是一个文件路径的列表
nr_list=[]
for i in wenjianlist:
a=file(i,"rb")
nr_list.append(a)
f_xieru = open("C:\Users\jitianze\Desktop\dns_message.txt","a")
#fw_list = map(file("for i in list","rb"),list)
for fw_list in nr_list:
pcap = dpkt.pcap.Reader(fw_list)
for ts,buf in pcap:
ethheader = buf[0:14]
dstmac = ethheader[0:6]
srcmac = ethheader[6:12]
netlayer_type = ethheader[12:14]
pktheader = buf[14:34]
trans_type = pktheader[9]
srcip = pktheader[12:16]
dstip = pktheader[16:20]
if (ord(trans_type) == 0x11): #UDP
udpheader = buf[34:42]
srcport = udpheader[0:2]
dstport = udpheader[2:4]
udplen = udpheader[4:6]
bodylen = ord(udplen[0])*256+ord(udplen[1])-8
dnsbody = buf[42:(42+bodylen)]
if (ord(dstport[0]) == 0x00 and ord(dstport[1]) == 0x35):
print "this stream has a DNS Request"
elif (ord(srcport[0]) == 0x00 and ord(srcport[1]) == 0x35):
print "this stream has a DNS Response"
# try:
# print "dnsbody:", dnsbody
jibangb = dns_response_body_parse(dnsbody)
print 'this is result:', jibangb
if jibangb != None:
f_xieru.write(jibangb + '\n')
else:
print "ibangb is none ..", jibangb
# f_xieru.writelines(dns_response_body_parse(dnsbody)) # wirte result to file
# except:
# print u"文件读取有误,捕获异常:"
else:
print "this stream has not dns"
elif (ord(trans_type) == 0x06): #TCP
tcpheader = buf[34:54]
srcport = tcpheader[0:2]
dstport = tcpheader[2:4]
print ("Finished, the result was in file dns-message.txt ")
f_xieru.close()
if __name__ == "__main__":
jiexi()
# 推荐几个学习网站 www.baidu.com
# 你差我3k
|
[
"978941570@qq.com"
] |
978941570@qq.com
|
112ee0674724c5a9a38297f73f438a2f4fdcf921
|
f9ef62886912d547e05a25492d1c826d7c5b7ce4
|
/main.py
|
38159c098f450b62bae3af5c8987475757688976
|
[] |
no_license
|
DariaKutkanych/telebot1601
|
f7fa3f7f2aa533eca969691afc2731edc562a7c1
|
525ff3787190d10d6c0c07c967ee3a1efb9a6af1
|
refs/heads/master
| 2020-12-14T10:29:03.986242
| 2020-01-18T10:38:21
| 2020-01-18T10:38:21
| 234,712,122
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,526
|
py
|
from telebot import TeleBot
import requests
from telebot import types
import re
from flask import Flask, request
import git
app = Flask(__name__)
@app.route('/update_server', methods=['POST'])
def webhook():
if request.method == 'POST':
repo = git.Repo('https://github.com/DariaKutkanych/telebot1601.git')
origin = repo.remotes.origin
origin.pull()
return 'Updated PythonAnywhere successfully', 200
else:
return 'Wrong event type', 400
bot = TeleBot("1007314497:AAHvjuAFIIN0GpUOevA3HturouG_tyS2hQ0")
@bot.message_handler()
def handle_massage(message):
result = requests.get(f"http://api.urbandictionary.com/v0/"
f"define?term={message.text}").json()
total = "\n".join(n["definition"] for n in result["list"])
# bot.send_message(message.chat.id, f'{total}')
words = re.findall(r"[^[]*\[([^]]*)\]", total)
markup = types.ReplyKeyboardMarkup()
listy = []
for a in words:
listy.append(types.KeyboardButton(a))
for a in range(len(listy)):
if a % 3 == 0:
try:
markup.row(listy[a], listy[a + 1], listy[a + 2])
except IndexError:
try:
markup.row(listy[a], listy[a + 1])
except IndexError:
markup.row(listy[a])
continue
bot.send_message(message.chat.id, f"{total} \n Choose one word:",
reply_markup=markup)
if __name__ == "__main__":
bot.polling()
|
[
"d.goubenko@gmail.com"
] |
d.goubenko@gmail.com
|
ef0e6cf6ca9250cefe2fa77139f106ff0ef48947
|
a1bffcd8854e1843e56bb812d4d83b3161a5211e
|
/tests/unit/modules/network/eric_eccli/test_eccli_command.py
|
69d1f7282b1984162e12aa895f75537d400e9f7d
|
[] |
no_license
|
goneri/ansible.community
|
1a71f9d98c164b77f8ed2ed7f558b4963005ff8f
|
f26f612dd0a3154050d90b51a75502018c95f6e4
|
refs/heads/master
| 2020-12-29T07:47:35.353515
| 2020-01-22T17:43:18
| 2020-01-22T17:43:18
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,111
|
py
|
# (c) 2019 Ericsson.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import json
from ansible_collections.ansible.community.tests.unit.compat.mock import patch
from ansible_collections.ansible.community.plugins.modules import eric_eccli_command
from ansible_collections.ansible.community.tests.unit.modules.utils import set_module_args
from ..eccli_module import TestEccliModule, load_fixture
class TestEccliCommandModule(TestEccliModule):
module = eric_eccli_command
def setUp(self):
super(TestEccliCommandModule, self).setUp()
self.mock_run_commands = patch('ansible_collections.ansible.community.plugins.modules.eric_eccli_command.run_commands')
self.run_commands = self.mock_run_commands.start()
def tearDown(self):
super(TestEccliCommandModule, self).tearDown()
self.mock_run_commands.stop()
def load_fixtures(self, commands=None):
def load_from_file(*args, **kwargs):
module, commands = args
output = list()
for item in commands:
try:
obj = json.loads(item['command'])
command = obj['command']
except ValueError:
command = item['command']
filename = str(command).replace(' ', '_')
output.append(load_fixture(filename))
return output
self.run_commands.side_effect = load_from_file
def test_eric_eccli_command_simple(self):
set_module_args(dict(commands=['show version']))
result = self.execute_module()
self.assertEqual(len(result['stdout']), 1)
self.assertTrue(result['stdout'][0].startswith('Ericsson IPOS Version'))
def test_eric_eccli_command_multiple(self):
set_module_args(dict(commands=['show version', 'show version']))
result = self.execute_module()
self.assertEqual(len(result['stdout']), 2)
self.assertTrue(result['stdout'][0].startswith('Ericsson IPOS Version'))
def test_eric_eccli_command_wait_for(self):
wait_for = 'result[0] contains "Ericsson IPOS"'
set_module_args(dict(commands=['show version'], wait_for=wait_for))
self.execute_module()
def test_eric_eccli_command_wait_for_fails(self):
wait_for = 'result[0] contains "test string"'
set_module_args(dict(commands=['show version'], wait_for=wait_for))
self.execute_module(failed=True)
self.assertEqual(self.run_commands.call_count, 10)
def test_eric_eccli_command_retries(self):
wait_for = 'result[0] contains "test string"'
set_module_args(dict(commands=['show version'], wait_for=wait_for, retries=2))
self.execute_module(failed=True)
self.assertEqual(self.run_commands.call_count, 2)
def test_eric_eccli_command_match_any(self):
wait_for = ['result[0] contains "Ericsson IPOS"',
'result[0] contains "test string"']
set_module_args(dict(commands=['show version'], wait_for=wait_for, match='any'))
self.execute_module()
def test_eric_eccli_command_match_all(self):
wait_for = ['result[0] contains "Ericsson IPOS"',
'result[0] contains "Version IPOS"']
set_module_args(dict(commands=['show version'], wait_for=wait_for, match='all'))
self.execute_module()
def test_eric_eccli_command_match_all_failure(self):
wait_for = ['result[0] contains "Ericsson IPOS"',
'result[0] contains "test string"']
commands = ['show version', 'show version']
set_module_args(dict(commands=commands, wait_for=wait_for, match='all'))
self.execute_module(failed=True)
def test_eric_eccli_command_configure_check_warning(self):
commands = ['configure terminal']
set_module_args({
'commands': commands,
'_ansible_check_mode': True,
})
result = self.execute_module()
self.assertEqual(
result['warnings'],
['only non-config commands are supported when using check mode, not executing configure terminal'],
)
def test_eric_eccli_command_configure_not_warning(self):
commands = ['configure terminal']
set_module_args(dict(commands=commands))
result = self.execute_module()
self.assertEqual(result['warnings'], [])
|
[
"ansible_migration@example.com"
] |
ansible_migration@example.com
|
f9b94fd16549e4335a99f5b29e47f86f3db2b938
|
fba385a1beed395472bd05d99b8e994c3b88b450
|
/Get_Save_Airfoils_Selig.py
|
fe7ebfa67e5ebd2559dbd3a1847ea7f74b747ebe
|
[
"MIT"
] |
permissive
|
irfan-gh/Panel_Methods
|
d9730871de57b4cc57ab5eac07683916e54a1900
|
a7d05d865e90bff36af06451b911bd4fa50df049
|
refs/heads/master
| 2023-08-08T21:56:17.675805
| 2020-09-27T15:05:50
| 2020-09-27T15:05:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,662
|
py
|
# GET AIRFOIL .DAT FILES FROM UIUC AIRFOIL SITE
# Written by: JoshTheEngineer
# Permission: Dr. Michael Selig (01/16/19)
# Started : 01/14/19
# Updated : 01/14/19 - Works as expected
#
# UIUC Airfoil Database: https://m-selig.ae.illinois.edu/ads/coord_database.html
# Importing
from bs4 import BeautifulSoup # Import the BeautifulSoup library
import re # Import regular expressions
try: # Import urllib
import urllib.request as urllib2
except ImportError:
import urllib2
# Base filepath for the UIUC airfoil website (used for accessing .dat files)
baseFlpth = "https://m-selig.ae.illinois.edu/ads/coord_seligFmt/" # Base filepath for saving
# Open the webpage and create the soup
html_page = urllib2.urlopen(baseFlpth) # Open the URL
soup = BeautifulSoup(html_page,'lxml') # Create the soup
# Loop over all relevant files and save each one
ind = 1 # Iteration counter
#links = [] # Initialize list of links for appending
for link in soup.find_all('a',attrs={'href': re.compile('\.dat', re.IGNORECASE)}): # Loop over all appropriate links on webpage
#links.append(link.get('href')) # Append the link to the list
urllib2.urlretrieve(baseFlpth+link.get('href'), link.get('href').rsplit('/',1)[-1]) # Get the data from the webpage, and save it to the save data file as the link name
print("Saving file %i" %ind) # Indicate the link that we are currently saving
ind = ind + 1 # Increment the counter
|
[
"noreply@github.com"
] |
irfan-gh.noreply@github.com
|
ecdffd9ed404030f3f8c1e61cc3ed95a75635d09
|
9e988c0dfbea15cd23a3de860cb0c88c3dcdbd97
|
/sdBs/AllRun/kuv_08497+3856/sdB_KUV_08497+3856_coadd.py
|
2a1ac0d64e1ee8a22903d7ed6955d68bab2928e7
|
[] |
no_license
|
tboudreaux/SummerSTScICode
|
73b2e5839b10c0bf733808f4316d34be91c5a3bd
|
4dd1ffbb09e0a599257d21872f9d62b5420028b0
|
refs/heads/master
| 2021-01-20T18:07:44.723496
| 2016-08-08T16:49:53
| 2016-08-08T16:49:53
| 65,221,159
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 441
|
py
|
from gPhoton.gMap import gMap
def main():
gMap(band="NUV", skypos=[133.22575,38.747683], skyrange=[0.0333333333333,0.0333333333333], stepsz = 30., cntfile="/data2/fleming/GPHOTON_OUTPUT/LIGHTCURVES/sdBs/sdB_KUV_08497+3856/sdB_KUV_08497+3856_movie_count.fits", cntcoaddfile="/data2/fleming/GPHOTON_OUTPUT/LIGHTCURVES/sdB/sdB_KUV_08497+3856/sdB_KUV_08497+3856_count_coadd.fits", overwrite=True, verbose=3)
if __name__ == "__main__":
main()
|
[
"thomas@boudreauxmail.com"
] |
thomas@boudreauxmail.com
|
ab810a2b2cc6921f5853a2df75b92560f5dd8270
|
cc241a46fc269fe69294575b9ce9cd48d3485955
|
/core/validator.py
|
10d526bfbee5accc8a69a149140fda8a701fc75b
|
[] |
no_license
|
xiami303/xiami
|
322db504e1b7ba08023ee7dbfe70bb830b0a7f77
|
c56ae851f6d5555a9f61b142ca5f12a267ff9303
|
refs/heads/master
| 2021-03-12T23:34:11.153647
| 2015-03-12T13:15:51
| 2015-03-12T13:15:51
| 20,083,445
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 29
|
py
|
#this is a validator of html
|
[
"wuhongxia303@163.com"
] |
wuhongxia303@163.com
|
24fe36866435adf25568f086ec78f306946c56a1
|
e34392bca8ad5d12a32fe9dc58ddcdf430283da4
|
/sequential.py
|
a08d589e80e4ecc4a75ab94891902365c96c7051
|
[] |
no_license
|
sharnam19/Document-Classification-Using-RNN
|
cafda6cde3b2bc897a854e00e53dd074d0c24d92
|
4092363b5c827788dd77d05cea03d8b7fb7f1f1d
|
refs/heads/master
| 2021-05-07T16:38:03.125478
| 2020-09-12T09:54:21
| 2020-09-12T09:54:21
| 108,593,763
| 1
| 0
| null | 2020-09-12T10:34:23
| 2017-10-27T20:42:28
|
Python
|
UTF-8
|
Python
| false
| false
| 1,578
|
py
|
import numpy as np
def rnn_step(x, h_prev, Wx, Wh, b):
"""
Input:
x : Input Sequence of shape (N,D)
h_prev : Previous Hidden State (N,H)
Wx : Input to Hidden Weight of shape (D,H)
Wh : Hidden to Hidden Weight of shape (H,H)
b : Bias of shape (H,)
Output:
h_next : Hidden State at Next Time Step of shape (N,H)
cache : Cached Values for Backprop
"""
h_next = np.tanh(x.dot(Wx)+h_prev.dot(Wh) + b[np.newaxis,:])
cache = (x,h_prev,Wx,Wh,h_next)
return h_next,cache
def rnn_step_backward(dOut,cache):
"""
Input
dOut: Upstream Gradients wrt h (N,H)
cache : Cached Values useful for backprop
Output:
dx: Gradients wrt x
dh_prev : Gradients wrt h_prev
dWx : Gradients wrt Wx
dWh : Gradients wrt Wh
db : Gradients wrt b
"""
x,h_prev,Wx,Wh,h_next = cache
dSq = (1-np.square(h_next))*dOut
dx = dSq.dot(Wx.T)
dWx = x.T.dot(dSq)
dh_prev = dSq.dot(Wh.T)
dWh = h_prev.T.dot(dSq)
db = np.sum(dSq,axis=0)
return dx,dh_prev,dWx,dWh,db
def word_embedding_forward(x, W):
out, cache = None, None
N = x.shape[0]
V,D = W.shape
out = W[x]
cache = (x,V)
return out, cache
def word_embedding_backward(dout, cache):
dW = None
x,V = cache
N = x.shape[0]
D = dout.shape[1]
dW = np.zeros((V, D))
np.add.at(dW, x.reshape(N), dout.reshape(N, D))
return dW
|
[
"sharnam19.nc@gmail.com"
] |
sharnam19.nc@gmail.com
|
4d0157e39ea590c852ee5ca71d560e90eebf2017
|
18702622356806fc5e8184983712799c6b844e0f
|
/python_basic/PycharmProjects/learnpython/functions.py
|
80ba82402870387d662ba215e58ae5e4f22f008f
|
[] |
no_license
|
NumanIbnMazid/Python
|
5db305554783f0b9bd1a741911b0dddabb9c96f5
|
dea5e36853e6c833df49f0d5abc69b33e366151d
|
refs/heads/master
| 2020-06-16T19:34:07.417135
| 2019-07-07T17:40:59
| 2019-07-07T17:40:59
| 195,680,231
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 218
|
py
|
#must be properly indented. Indention Rules
def say_hi(name, age):
print("Hellow " + name, ", you are " + str(age))
print("Top", "35")
#calling function
say_hi("Numan", 22)
say_hi("Mike", 33)
print("Bottom", 36)
|
[
"numanibnmazid@gmail.com"
] |
numanibnmazid@gmail.com
|
2438900e536c9969f5fc625bbbc4d068063ee041
|
20d8dde9e3a64e65c1d2ff5905d83403415396ec
|
/modules/music_player.py
|
22fd1021c2228cfa16adffb8bc31a4a7f4f2a10b
|
[] |
no_license
|
carrtesy/embedded_system_project
|
7ad7bb92b2f66b40ca39127c2e9f917b5ad3a962
|
baedcdb2594d37fd81a2ca32f5103caff0b44a73
|
refs/heads/master
| 2023-01-31T00:38:18.854120
| 2020-12-10T08:00:48
| 2020-12-10T08:00:48
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 923
|
py
|
import subprocess
import signal
import threading
import time, os
class MusicPlayer:
def __init__(self):
subprocess.call('pwd', shell=True)
subprocess.call('ls ./assets', shell=True)
self.process = None
def play(self, song):
if self.process is not None:
os.killpg(os.getpgid(self.process.pid), signal.SIGTERM)
song_loc = "./assets/" + song
self.process = subprocess.Popen(
args = ["omxplayer", "--vol", "-3000", song_loc],
preexec_fn = os.setsid,
)
def stop(self):
print("killing proc {} pid {} os {}".format(self.process, self.process.pid, os.getpgid(self.process.pid)))
os.killpg(os.getpgid(self.process.pid), signal.SIGTERM)
self.process = None
if __name__ == "__main__":
player = MusicPlayer()
player.play("calm1.mp3")
time.sleep(5)
player.stop()
|
[
"dongminkim0220@gmail.com"
] |
dongminkim0220@gmail.com
|
b817f5069575a404f5eb5ca145bbed754f5c8792
|
40799b8e2cec6a481a2923183c79e35c7f654676
|
/int_test.py
|
bde76eb3dd27975c152b2e5b15361bc0a18416ab
|
[] |
no_license
|
chengxinlun/se-finder
|
af5c145e6037a9687717a759d5463caae155b1a4
|
ffcd4f0450cbd7f5508271d0d001f892aab5c396
|
refs/heads/master
| 2021-01-10T13:37:18.213570
| 2016-02-22T12:53:24
| 2016-02-22T12:53:24
| 47,450,241
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 308
|
py
|
import numpy as np
from fe_temp_int import FeII_template_int
from scipy.integrate import quad
import matplotlib.pyplot as plt
int_func_class = FeII_template_int(0.0, 2000.0, 1.0, 0.0, 2000.0, 1.0)
x = np.linspace(4000.0, 5500.0, 100000)
plt.plot(x, list(map(int_func_class.func, x)))
plt.show()
plt.close()
|
[
"chengxinlun@gmail.com"
] |
chengxinlun@gmail.com
|
3a1b5cd260232af67ce4f66ab9915fe16b5c4d6e
|
a46b981266a27217e8bb2f9345a8f13c60062986
|
/ex14.py
|
eb0c8d973e93668012eaff9f996b02ee96bb3b8e
|
[] |
no_license
|
huxx-j/python-ex
|
788d674ff2d0e8494a222de77df187de10dd2b04
|
7f9f7fd30c8a0878cb9b8ca6ac3844529d34718a
|
refs/heads/master
| 2020-03-18T18:26:54.866320
| 2018-05-28T00:58:41
| 2018-05-28T00:58:41
| 135,093,410
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 133
|
py
|
s = '/usr/local/bin/python.exe'
s1 = s[1:].split("/",4)
s2 = ",".join(s1)
print(s2)
s3 = s.rsplit("/",1)
s4 = ",".join(s3)
print(s4)
|
[
"perfekta213@gmail.com"
] |
perfekta213@gmail.com
|
513b637f309d707ae9864a831908331cb3bf9d69
|
d0db054bd2c437b741af9b8dc9fb05176a00912d
|
/manage.py
|
87402b0f5cbf25485cbddb6ce75f2e770be0583e
|
[] |
no_license
|
daubers/HackDoorMenu
|
72ae16220c53f2fad9f25dccc8b115d888380eb5
|
6bf8732a082470b456bb29e93b1b447bed7509d5
|
refs/heads/master
| 2020-06-05T09:18:45.643756
| 2014-06-10T20:28:13
| 2014-06-10T20:28:13
| 20,156,300
| 1
| 0
| null | 2014-06-10T17:43:36
| 2014-05-25T14:21:58
|
Python
|
UTF-8
|
Python
| false
| false
| 255
|
py
|
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "HackDoorMenu.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
|
[
"matt@daubers.co.uk"
] |
matt@daubers.co.uk
|
be1b47f8e5db8863598580cf9d003925704e3d68
|
e9fd34f8d78ad69c3586557fd3d0ff2d66b23f02
|
/finishRoboFogExport.py
|
b09fc8689a75ba067ae33abe7b8dc5e2c26d6565
|
[] |
no_license
|
LettError/robofogExport
|
878021681d7df090138563086318ca186bba2b26
|
1406dfbfb0aee0d3716da3d1fcdede849c585ff7
|
refs/heads/master
| 2021-01-01T03:44:46.116479
| 2016-04-17T17:05:21
| 2016-04-17T17:05:21
| 56,446,460
| 3
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 664
|
py
|
# finish the export from RoboFog.
"""
put the background glyphs in the right place
"""
def cleanupBackground(f):
moved = []
for g in f:
if g.name[0] == "#":
# do we have a foreground companion?
other = g.name.replace("#", "")
if other not in f:
print "background glyph %s has no foreground"%g.name
continue
bg = f[other].getLayer("background")
bg.clear()
moved.append(g.name)
print bg
bg.appendGlyph(g)
for name in moved:
f.removeGlyph(name)
f = CurrentFont()
cleanupBackground(f)
print 'done'
|
[
"erik@letterror.com"
] |
erik@letterror.com
|
3001443877633db13aaffaceac1c2e45cbcdaa2c
|
89c0ffc249cb9df44040841f5b639c4487aa0a74
|
/web/talks/management/commands/update_all_talks.py
|
4b03dfab115262bc02db6bcbe216ddd21fdbfdf0
|
[
"Apache-2.0"
] |
permissive
|
vtalks/vtalks.net
|
161bb0b46199a4b7e81997560198500c940a0be5
|
80fb19ff9684e0854c6abe5f0eef73e80ec326a6
|
refs/heads/master
| 2018-09-01T13:18:08.236948
| 2018-07-25T02:34:10
| 2018-07-25T02:34:10
| 112,273,942
| 2
| 0
|
Apache-2.0
| 2018-07-19T00:51:10
| 2017-11-28T02:18:29
|
Python
|
UTF-8
|
Python
| false
| false
| 785
|
py
|
from datetime import date
from datetime import datetime
from datetime import timedelta
from django.utils import timezone
from django.core import management
from django.core.management.base import BaseCommand
from talks.models import Talk
class Command(BaseCommand):
help = 'Update all talks on the database.'
def handle(self, *args, **options):
today = date.today()
yesterday = today-timedelta(days=1)
datetime_updated_at = datetime.strptime(str(yesterday), "%Y-%m-%d")
datetime_updated_at = datetime_updated_at.replace(tzinfo=timezone.utc)
talks = Talk.published_objects.filter(updated__lte=datetime_updated_at).order_by('-updated')
for talk in talks:
management.call_command("update_talk", talk.youtube_url)
|
[
"repejota@gmail.com"
] |
repejota@gmail.com
|
193330416b0d6f460cc33d93bbe928afd007ae08
|
d2509e6d77789993530830e12b15d709831f6c06
|
/array1.py
|
796113e1ca7f141621ed98718b406133236bdfb0
|
[] |
no_license
|
jawnhoang/LessonsWithDaHomies
|
08725537ff03600aabda9cee230d9b8eb0e88cd5
|
25ea7eaeb3e12ccb4bda56af0f57e5e516fd6e10
|
refs/heads/main
| 2023-06-16T12:34:53.181614
| 2021-07-13T19:40:12
| 2021-07-13T19:40:12
| 383,579,430
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 773
|
py
|
# # fill an array with 100 integers, of multiples of 3s
# #declare array
# exArr =[]
# for i in range(100): #this should go from 0-99
# if i % 3 == 0:
# exArr.append(i)
#
# print(exArr)
#
# #remove all elements that end with a 5
# for i in exArr:
# if i % 5 == 0:
# exArr.remove(i)
#
# print(exArr)
# remove elements
x = []
#fill an array
for i in range(10):
x.append(i)
print(x)
#remove first element inside list
x.remove(0)
print(x)
#remove element using it's index
del x[0]
print(x)
#remove last element
del x[-1]
print(x)
#remove element in the middle with known index
del x[3] #or del x[-4] <- starts from right to left
print(x)
for i in x:
if i == 6:
x.remove(i)
print(x)
|
[
"noreply@github.com"
] |
jawnhoang.noreply@github.com
|
f3b39a7c3cc0ea7050dcfead11919b8c2437c76b
|
2499d071c0c354d8d7ec800244ac964824756e4e
|
/ipcamera5.py
|
c317dc37883c6bb1b61f18185de310981cd19cec
|
[] |
no_license
|
harshvardhan33/ImageProcessing
|
959d88aca920f817f814aa493e1e436d01af29e5
|
6d137d9873731cf8f03d0d758308c145a15ff7d7
|
refs/heads/master
| 2023-06-10T22:36:30.324033
| 2021-06-26T06:34:58
| 2021-06-26T06:34:58
| 260,647,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 899
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Thu Nov 19 12:37:22 2020
@author: harshvardhan
"""
import cv2
video = cv2.VideoCapture(0)
address = "http://192.168.29.112:8080/video"
video.open(address)
if (video.isOpened() == False):
print("Error reading video file")
frame_width = int(video.get(3))
frame_height = int(video.get(4))
size = (frame_width, frame_height)
result = cv2.VideoWriter('ycrcb.avi',cv2.VideoWriter_fourcc(*'MJPG'), 10, size)
while(True):
ret, frame = video.read()
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2YCrCb)
if ret == True:
result.write(frame)
cv2.imshow('Frame', frame)
if cv2.waitKey(1) & 0xFF == ord('s'):
break
else:
break
video.release()
result.release()
# Closes all the frames
cv2.destroyAllWindows()
print("The video was successfully saved")
|
[
"noreply@github.com"
] |
harshvardhan33.noreply@github.com
|
d7604731d4f62ad742ee171ac900078bcbe1aeb9
|
7933620fcfc544c44b7b0d9d0c0c980465b86a70
|
/participantes/urls.py
|
32cd775bee002325151e6b86c642822389c979a8
|
[] |
no_license
|
pachecomuoz-pedro/Hackaton_2019
|
c77b721f537035047fbbb2dbae76a08eeadd76ca
|
0dd29167a9b390cf577caf7aad65023834a5f07e
|
refs/heads/master
| 2020-04-09T21:37:30.372003
| 2018-12-06T02:50:01
| 2018-12-06T02:50:01
| 160,607,935
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 679
|
py
|
from django.urls import path
from participantes.views import entrar,lista_participantes,registro_participante,editar_participante,eliminar_participante,mostrar_mapa,carga_municipio
urlpatterns = [
path('', entrar, name='participantes'),
path('lista', lista_participantes, name='lista_participantes'),
path('mapa', mostrar_mapa, name='mostrar_mapa'),
path('registrar', registro_participante, name='registro_participante'),
path('editar/<int:id>', editar_participante, name='editar_participante'),
path('eliminar/<int:id>', eliminar_participante, name='eliminar_participante'),
path('carga_mun', carga_municipio, name='carga_municipio'),
]
|
[
"pachecomuoz_pedro@yahoo.com"
] |
pachecomuoz_pedro@yahoo.com
|
3d842aa4e62c82f865264b01e0f6d25b58ab2546
|
d4cdc6c9e2580b2011d63f6d62f70ab9e13cd317
|
/sld-dashboard/app/helpers/converter.py
|
a2bdb7cace1f5fe277b4e417f8993dae10948627
|
[
"MIT"
] |
permissive
|
timezombi/Stack-Lifecycle-Deployment
|
75cc92bc0267953039f0d66c7c219a8d444817c8
|
d84241099fb44762476b4201a2fc195e76975e26
|
refs/heads/master
| 2023-07-13T11:11:35.001371
| 2021-08-20T13:35:14
| 2021-08-20T13:35:14
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 444
|
py
|
import ast
def convert_to_dict(data):
result = []
for key, val in data.items():
try:
check = ast.literal_eval(val)
except:
continue
if isinstance(check, dict):
data[key] = check
result.append(data)
elif isinstance(check, list):
data[key] = check
result.append(data)
if not len(result):
return data
return result[0]
|
[
"{ID}+{username}@users.noreply.github.com"
] |
{ID}+{username}@users.noreply.github.com
|
21a8b679743967c19a4d6d958380256532a3929d
|
fa8036fd416aecab3f1ca617acf0989f032f02ce
|
/abc162/C.py
|
568b921d6090c303822258c9f02c9ac29a107550
|
[] |
no_license
|
MitsuruFujiwara/Atcoder
|
e2e2e82014e33e3422ea40eca812c6fdd8bcaaaa
|
bc73c4cd35a80c106d0e9b14cee34a064d89d343
|
refs/heads/master
| 2022-12-14T23:50:56.843336
| 2020-09-17T22:25:57
| 2020-09-17T22:25:57
| 252,980,448
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 177
|
py
|
from math import gcd
K = int(input())
ans = 0
for a in range(1,K+1):
for b in range(1,K+1):
for c in range(1,K+1):
ans += gcd(gcd(a,b),c)
print(ans)
|
[
"fujiwara52jp@gmail.com"
] |
fujiwara52jp@gmail.com
|
3856aa50b182d6597a5a7fb4aeed9d94f6ab3a0f
|
a6add24cdf8a3428d69d647feae92175cf66b40c
|
/test/test_puts_demo.py
|
aaca9f2f37e99e3b813fd0c07963e932cdc2415c
|
[
"BSD-3-Clause",
"NCSA"
] |
permissive
|
jriehl/llvmpy
|
06cb662fb4361d7dc6fae9e067113b680d5dd596
|
d2364404c7d8b6d376b4185ddd982d0d9748038d
|
refs/heads/master
| 2021-01-18T17:59:12.119304
| 2012-11-15T17:53:24
| 2012-11-15T17:53:24
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,037
|
py
|
#! /usr/bin/env python
# ______________________________________________________________________
'''The goal of this test is to build and demonstrate the following
LLVM assembly code using the API:
@msg = internal constant [15 x i8] c"Hello, world.\\0A\\00"
declare i32 @puts(i8 *)
define i32 @not_really_main() {
%cst = getelementptr [15 x i8]* @msg, i32 0, i32 0
call i32 @puts(i8 * %cst)
ret i32 0
}
'''
import llvm.core as lc
import llvm.ee as le
# ______________________________________________________________________
def main (*args, **kws):
m = lc.Module.new(b'demo_module')
i8 = lc.Type.int(8)
i32 = lc.Type.int(32)
i8ptr = lc.Type.pointer(i8)
puts_ty = lc.Type.function(i32, [i8ptr])
puts_decl = m.add_function(puts_ty, b'puts')
hello_fn_ty = lc.Type.function(i32, [])
hello_fn = m.add_function(hello_fn_ty, b'hello_fn')
bb = hello_fn.append_basic_block(b'entry')
builder = lc.Builder.new(bb)
# Was having a devil of time using stringz(), since it returns a
# value of type [15 x i8], as opposed to [15 x i8]*. The weird
# part is that global variables seem to become pointer types when
# used inside functions.
# See: http://comments.gmane.org/gmane.comp.compilers.llvm.devel/28601
hello_str = lc.Constant.stringz(b'Hello, world.\n')
hello_var = m.add_global_variable(hello_str.type, b'msg')
# Required a patch to get this to work.
# XXX Need to extend patch to other constant constructors in core.py.
hello_var._set_initializer(hello_str)
zero = lc.Constant.int(i32, 0)
cst = builder.gep(hello_var, [zero, zero], b'cst')
builder.call(puts_decl, [cst])
builder.ret(zero)
print(str(m.__str__()))
ee = le.ExecutionEngine.new(m)
ee.run_function(hello_fn, [])
# ______________________________________________________________________
if __name__ == "__main__":
import sys
main(*sys.argv[1:])
# ______________________________________________________________________
# End of test_puts_demo.py
|
[
"jriehl@spaceship.com"
] |
jriehl@spaceship.com
|
4632a80711b229a9ae8b9947f18494f68b594ed6
|
892785b084dde0a996e3d31246e1336335e41815
|
/deployment/pkgs/bin/sample
|
1d7489dc5a96c33c2339b0ad17ec2536791dd811
|
[] |
no_license
|
RyanBacastow/splitter
|
5598090be1065687f54ee8a4d80836e533612e11
|
013869a40286f99facb09461fa5c8d3449c5c8b6
|
refs/heads/master
| 2022-12-11T08:43:02.046776
| 2020-08-31T14:40:25
| 2020-08-31T14:40:25
| 289,096,974
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 212
|
#!/var/lang/bin/python3.7
# -*- coding: utf-8 -*-
import re
import sys
from sample import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(main())
|
[
"ryan.bacastow@tmsw.com"
] |
ryan.bacastow@tmsw.com
|
|
d4803c4b5e71f53cb79dc8d47e3a297374733c42
|
5cfe09ff0079e8e39fc3bcfb7a4a7ba0803150de
|
/Python/grafica.py
|
a4b1acd8fbb5c04d6a81a25cea9033ecaeab55d6
|
[] |
no_license
|
MauricioD13/Proyecto1_Codigos
|
bf89b27cab3c91de3271382c896c211c825ec7d8
|
378076bcf9bdc36348cee9faa683be00048547ba
|
refs/heads/master
| 2023-01-19T10:07:06.039576
| 2020-11-24T16:22:31
| 2020-11-24T16:22:31
| 298,728,858
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,683
|
py
|
import matplotlib.pyplot as plt
import numpy as np
import sys
import math
import statistics
import mplcursors
def guardar(archivo,time,voltage0,voltage1,voltage2):
labels=[]
labels=archivo.readline().split("\t")
#labels[1]=labels[1].replace("\n","")
i=0
for line in archivo:
linea=line.split("\t")
temp=linea[0].split(",")
time.append(float(temp[0]+"."+temp[1]))
temp=linea[1].split(",")
voltage0.append(float(temp[0]+"."+temp[1]))
temp=linea[3].split(",")
voltage1.append(float(temp[0]+"."+temp[1]))
temp=linea[5].split(",")
voltage2.append(float(temp[0]+"."+temp[1]))
i+=1
if(i>=20000):
break
return labels
option=input("Opcion?: ")
if(option=="ganancia"):
archivo0=open("./datos/Ganancia_1mV_80Hz.txt","r");
#~/Documents/Python/Proyecto/
#parameter=sys.argv[1].split(".")
#file_write=open(parameter[0]+"_fixed.txt","w")
time0=[]
voltage0=[]
time1=[]
voltage1=[]
time2=[]
voltage2=[]
labels0=guardar(archivo0,time0,voltage0,voltage1,voltage2)
archivo0.close()
time_array0=np.array(time0[1000:5000])
voltage_array0=np.array(voltage0[1000:5000])
time_array1=np.array(time1[1000:5000])
voltage_array1=np.array(voltage1[1000:5000])
time_array2=np.array(time2[1000:5000])
voltage_array2=np.array(voltage2[1000:5000])
fig, ax=plt.subplots(3,1)
fig.subplots_adjust(hspace=0.5)
print("MUESTRAS:",len(time0))
ax[0].plot(time_array0,voltage_array0,label="Primera Etapa: Entrada diferencial") # Plot some data on the axes.
ax[0].grid(True)
ax[0].set_ylabel("Voltage")
ax[0].set_xlabel("Tiempo")
ax[0].set(ylim=(1,3))
ax[0].set_title("Primera Etapa")
ax[1].plot(time_array0,voltage_array1,'g',label="Segunda Etapa: Filtro") # Plot some data on the axes.
ax[1].grid(True)
ax[1].set_ylabel("Voltage")
ax[1].set_xlabel("Tiempo")
ax[1].set(ylim=(1,3))
ax[1].set_title("Segunda Etapa")
ax[2].plot(time_array0,voltage_array2,'r',label="Tercera Etapa: Filtro")
ax[2].grid(True)
ax[2].set_ylabel("Voltage")
ax[2].set_xlabel("Tiempo")
ax[2].set(ylim=(1,3))
ax[2].set_title("Tercera Etapa")
mplcursors.cursor(multiple=True).connect(
"add", lambda sel: sel.annotation.draggable(False))
plt.show()
elif(option=="manejo"):
archivo0=open("./datos/Manejo_80Hz_30mV.txt","r");
#~/Documents/Python/Proyecto/
#parameter=sys.argv[1].split(".")
#file_write=open(parameter[0]+"_fixed.txt","w")
time0=[]
voltage0=[]
time1=[]
voltage1=[]
time2=[]
voltage2=[]
labels0=guardar(archivo0,time0,voltage0,voltage1,voltage2)
archivo0.close()
time_array0=np.array(time0[0:500])
voltage_array0=np.array(voltage0[0:500])
voltage_array1=np.array(voltage1[0:500])
voltage_array2=np.array(voltage2[0:500])
fig, ax=plt.subplots(1,3)
print("MUESTRAS:",len(time0))
ax[0].plot(time_array0,voltage_array0,'r',label="Primera Etapa: Entrada diferencial") # Plot some data on the axes.
ax[0].set_ylabel("Voltage")
ax[0].set_xlabel("Tiempo")
ax[0].set_title("Primera Etapa")
ax[0].grid(True)
ax[0].set(ylim=(0,5.5))
ax[1].plot(time_array0,voltage_array1,'g',label="Segunda Etapa: Filtro") # Plot some data on the axes.
ax[1].set_title("Segunda Etapa")
ax[1].grid(True)
ax[1].set(ylim=(0,5.5))
ax[1].set_ylabel("Voltage")
ax[1].set_xlabel("Tiempo")
ax[2].set_title("Tercera Etapa")
ax[2].plot(time_array0,voltage_array2,label="Tercera Etapa: Filtro")
ax[2].grid(True)
ax[2].set(ylim=(0,5.5))
ax[2].set_ylabel("Voltage")
ax[2].set_xlabel("Tiempo")
#plt.grid(True)
#ax[1].set_ylabel(labels1[1])
#ax[1].set_xlabel(labels1[0])
mplcursors.cursor()
plt.show()
elif(option=="comun"):
archivo0=open("./datos/Ganancia_comun_1mV_80Hz.txt","r");
time_common=[]
voltage_common=[]
labels=[]
labels=archivo0.readline().split("\t")
for line in archivo0:
linea=line.split("\t")
temp=linea[0].split(",")
time_common.append(float(temp[0]+"."+temp[1]))
temp=linea[1].split(",")
voltage_common.append(float(temp[0]+"."+temp[1]))
archivo0.close()
time_array0=np.array(time_common[0:5000])
voltage_array0=np.array(voltage_common[0:5000])
average=0
for i in time_common:
average=i+average
average=average/len(time_common)
fig, ax=plt.subplots()
common_gain=(average/0.001)-200
print(f"MUESTRAS:{len(time_common)}, Promedio: {average}: Ganancia comun: {common_gain} CMRR:{20*math.log10(1000/common_gain)}")
ax.plot(time_array0,voltage_array0,label="Entrada modo comun") # Plot some data on the axes.
plt.grid(True)
ax.set_ylabel("Voltage")
ax.set_xlabel("Tiempo")
plt.legend(loc='upper left')
plt.show()
elif(option=="ruido"):
archivo0=open("./datos/Distorsión_1m_80Hz.txt","r");
time_common=[]
voltage_common=[]
labels=[]
labels=archivo0.readline().split("\t")
for line in archivo0:
linea=line.split("\t")
temp=linea[0].split(",")
time_common.append(float(temp[0]+"."+temp[1]))
temp=linea[1].split(",")
voltage_common.append(float(temp[0]+"."+temp[1]))
archivo0.close()
time_array0=np.array(time_common[0:1000])
voltage_array0=np.array(voltage_common[0:1000])
average=0
for i in time_common:
average=i+average
average=average/len(time_common)
fig, ax=plt.subplots()
common_gain=(average/0.001)-200
print(f"MUESTRAS:{len(time_common)}, Promedio: {average}: Ganancia comun: {common_gain} CMRR:{20*math.log10(1000/common_gain)}")
ax.plot(time_array0,voltage_array0) # Plot some data on the axes.
plt.grid(True)
ax.set_ylabel("Potencia [dB]")
ax.set_title("Power Spectrum")
ax.set_xlabel("Frecuencia [Hz]")
mplcursors.cursor(multiple=True).connect(
"add", lambda sel: sel.annotation.draggable(False))
plt.show()
|
[
"mdavid_cuello@javeriana.edu.co"
] |
mdavid_cuello@javeriana.edu.co
|
d067020ff739438aeb20ffea7e8a9e17bfabef8d
|
4f96e9ebc0ee5bd99b1e01cedcb6ce3eb5a83874
|
/Gzuis.py
|
6e8760977958ff91a26e992084e37d20ef192e95
|
[] |
no_license
|
hbisneto/Gzuis
|
e764c4057ce45406a552ef1632385d1ebe8e56c6
|
a12d9d525300392b445eb77109f5bc2e1ec12d49
|
refs/heads/main
| 2023-03-03T00:52:12.621609
| 2021-02-08T21:03:14
| 2021-02-08T21:03:14
| 337,183,020
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,621
|
py
|
# Bibliotecas e outras dependencias
import Palavra
import LoveCard
from datetime import date
AnoAtual = date.today().year
SoftwareName = "GZUIS"
Version = "1.0"
CopyrightName = "Heitor Bisneto."
Space = " "
print("="*80)
print(f'[{SoftwareName}] - Em Execução...')
print("="*80)
print("Nome:", SoftwareName)
print("Versão:", Version)
print("Criado por:", CopyrightName)
if AnoAtual == 2021:
print("Copyright ©", AnoAtual, "|", CopyrightName, "All rights reserved.")
else:
print("Copyright © 2021 -", AnoAtual, "|", CopyrightName, "All rights reserved.")
print("")
MatrizOpc = ['LOVECARD', 'DWORD']
Count = int()
AboutItem = str()
Space = " "
UserInput = int()
# Funções de comandos
def CartaAmor():
LoveCard.LCFunc()
def PalavraDia():
print("="*80)
print(f'{Space} >> Palavra do dia <<')
print("="*80)
Palavra.Process()
def MyApp():
# Input do usuário:
Opc = int(input(f'<{SoftwareName}/Opção:> <{SoftwareName}/'))
UserInput = Opc
if UserInput == 1:
CartaAmor()
elif UserInput == 2:
PalavraDia()
# Lista de comandos
print("="*80)
print(f'{Space} >> Digite o número da opção desejada <<')
print("="*80)
print(" COMANDO FUNÇÃO DO COMANDO")
print("="*80)
for i in MatrizOpc:
Count = Count + 1
if Count == 1:
AboutItem = f'{Space} Abrir função "Carta de amor"'
elif Count == 2:
AboutItem = f'{Space} Abrir função "Palavra do Dia"'
print(f'<{SoftwareName}/{Count}> {i}{AboutItem}')
print("="*80)
MyApp()
|
[
"noreply@github.com"
] |
hbisneto.noreply@github.com
|
0dd18912dbc944902d7ec38e966c65cba6b73979
|
f9d564f1aa83eca45872dab7fbaa26dd48210d08
|
/huaweicloud-sdk-iotedge/huaweicloudsdkiotedge/v2/model/batch_confirm_configs_new_request.py
|
371de812400930aa867c1cfd0c328c4bb1858b17
|
[
"Apache-2.0"
] |
permissive
|
huaweicloud/huaweicloud-sdk-python-v3
|
cde6d849ce5b1de05ac5ebfd6153f27803837d84
|
f69344c1dadb79067746ddf9bfde4bddc18d5ecf
|
refs/heads/master
| 2023-09-01T19:29:43.013318
| 2023-08-31T08:28:59
| 2023-08-31T08:28:59
| 262,207,814
| 103
| 44
|
NOASSERTION
| 2023-06-22T14:50:48
| 2020-05-08T02:28:43
|
Python
|
UTF-8
|
Python
| false
| false
| 4,737
|
py
|
# coding: utf-8
import six
from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization
class BatchConfirmConfigsNewRequest:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'node_id': 'str',
'ia_id': 'str',
'body': 'ConfirmIaConfigsRequestBody'
}
attribute_map = {
'node_id': 'node_id',
'ia_id': 'ia_id',
'body': 'body'
}
def __init__(self, node_id=None, ia_id=None, body=None):
"""BatchConfirmConfigsNewRequest
The model defined in huaweicloud sdk
:param node_id: 边缘节点ID
:type node_id: str
:param ia_id: 边侧第三方应用的模块ID
:type ia_id: str
:param body: Body of the BatchConfirmConfigsNewRequest
:type body: :class:`huaweicloudsdkiotedge.v2.ConfirmIaConfigsRequestBody`
"""
self._node_id = None
self._ia_id = None
self._body = None
self.discriminator = None
self.node_id = node_id
self.ia_id = ia_id
if body is not None:
self.body = body
@property
def node_id(self):
"""Gets the node_id of this BatchConfirmConfigsNewRequest.
边缘节点ID
:return: The node_id of this BatchConfirmConfigsNewRequest.
:rtype: str
"""
return self._node_id
@node_id.setter
def node_id(self, node_id):
"""Sets the node_id of this BatchConfirmConfigsNewRequest.
边缘节点ID
:param node_id: The node_id of this BatchConfirmConfigsNewRequest.
:type node_id: str
"""
self._node_id = node_id
@property
def ia_id(self):
"""Gets the ia_id of this BatchConfirmConfigsNewRequest.
边侧第三方应用的模块ID
:return: The ia_id of this BatchConfirmConfigsNewRequest.
:rtype: str
"""
return self._ia_id
@ia_id.setter
def ia_id(self, ia_id):
"""Sets the ia_id of this BatchConfirmConfigsNewRequest.
边侧第三方应用的模块ID
:param ia_id: The ia_id of this BatchConfirmConfigsNewRequest.
:type ia_id: str
"""
self._ia_id = ia_id
@property
def body(self):
"""Gets the body of this BatchConfirmConfigsNewRequest.
:return: The body of this BatchConfirmConfigsNewRequest.
:rtype: :class:`huaweicloudsdkiotedge.v2.ConfirmIaConfigsRequestBody`
"""
return self._body
@body.setter
def body(self, body):
"""Sets the body of this BatchConfirmConfigsNewRequest.
:param body: The body of this BatchConfirmConfigsNewRequest.
:type body: :class:`huaweicloudsdkiotedge.v2.ConfirmIaConfigsRequestBody`
"""
self._body = body
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
import simplejson as json
if six.PY2:
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)
def __repr__(self):
"""For `print`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, BatchConfirmConfigsNewRequest):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
[
"hwcloudsdk@huawei.com"
] |
hwcloudsdk@huawei.com
|
23d043ad9da2ca665ea22dd1ccb6f5fdcd1127e0
|
5b711af0e64686c218bf25fa776d6fb461a70235
|
/00-06/00-06_main.py
|
561b11af37988dd96939cf122089f9d87558e4e7
|
[] |
no_license
|
slowhand123/region_OpenTK
|
2f835205e4c935a1e5a656aab890ac87fdaa2ede
|
53db37a12c7036ee9672617832a210de5586d574
|
refs/heads/master
| 2021-05-28T20:05:26.855525
| 2015-03-30T12:03:08
| 2015-03-30T12:05:22
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,079
|
py
|
#
# Reference Path : OpenTK
#
import clr
clr.AddReference("OpenTK")
from System import *
from OpenTK import *
from OpenTK.Graphics import *
from OpenTK.Graphics.OpenGL import *
from OpenTK.Input import *
class Game(GameWindow):
#800x600のウィンドウを作る。タイトルは「0-6:with IronPython」
def __init__(self):
super(Game, self).__init__()
self.Width = 800
self.Height = 600
self.Title = "0-6:with IronPython"
self.VSync = VSyncMode.On
#ウィンドウの起動時に実行される
def OnLoad(self, e):
super(Game, self).OnLoad(e)
GL.ClearColor(Color4.Black)
GL.Enable(EnableCap.DepthTest)
#ウィンドウのサイズが変更された場合に実行される
def OnResize(self, e):
super(Game, self).OnResize(e)
GL.Viewport(self.ClientRectangle.X, self.ClientRectangle.Y, self.ClientRectangle.Width, self.ClientRectangle.Height)
GL.MatrixMode(MatrixMode.Projection)
projection = Matrix4.CreatePerspectiveFieldOfView(float(Math.PI / 4), float(self.Width) / float(self.Height), 1.0, 64.0)
GL.LoadMatrix(projection)
#画面更新で実行される
def OnUpdateFrame(self, e):
super(Game, self).OnUpdateFrame(e)
#Escapeキーで終了
if(self.Keyboard[Key.Escape]):
self.Exit()
#画面描画で実行される
def OnRenderFrame(self, e):
super(Game, self).OnRenderFrame(e)
GL.Clear(ClearBufferMask.ColorBufferBit | ClearBufferMask.DepthBufferBit)
GL.MatrixMode(MatrixMode.Modelview)
modelview = Matrix4.LookAt(Vector3.Zero, Vector3.UnitZ, Vector3.UnitY)
GL.LoadMatrix(modelview)
GL.Begin(BeginMode.Quads)
GL.Color4(Color4.White) #色名で指定
GL.Vertex3(-1.0, 1.0, 4.0)
GL.Color4(Array[float]([1.0, 0.0, 0.0, 1.0])) #配列で指定
GL.Vertex3(-1.0, -1.0, 4.0)
GL.Color4(0.0, 1.0, 0.0, 1.0) #4つの引数にfloat型で指定
GL.Vertex3(1.0, -1.0, 4.0)
GL.Color4(Byte(0), Byte(0), Byte(255), Byte(255)) #byte型で指定
GL.Vertex3(1.0, 1.0, 4.0)
GL.End()
self.SwapBuffers()
if __name__ == '__main__':
window = Game()
window.Run()
window.Dispose()
|
[
"occar@hotmail.co.jp"
] |
occar@hotmail.co.jp
|
281fde97849774e3210a17505fd4660c09c7f133
|
93ba28a7808ed5a406753748fedbdbaea5f3c8b2
|
/drawVarsData_KS_CheckFlat_sync.py
|
d4ec1d1e613ae79f95cf4ec181d5c49e8173d5cf
|
[] |
no_license
|
zaixingmao/samples-plots
|
7a55005abab1e7644296d1eb2e76f603d160a37b
|
bb2371c7f664a84c454189ec648bb55630cb7565
|
refs/heads/master
| 2020-05-21T23:27:34.390427
| 2017-07-14T14:59:52
| 2017-07-14T14:59:52
| 24,139,867
| 0
| 0
| null | 2015-10-23T16:00:22
| 2014-09-17T10:17:08
|
Python
|
UTF-8
|
Python
| false
| false
| 24,839
|
py
|
#!/usr/bin/env python
import ROOT as r
import tool
from operator import itemgetter
import os
from cfg import enVars
import varsList
import optparse
import math
from array import array
import numpy
import random
from cfg import draw_sync as draw_cfg
def opts():
parser = optparse.OptionParser()
parser.add_option("--variable", dest="varName", default = 'test', help="")
parser.add_option("--signal", dest="signal", default = '', help="")
parser.add_option("--logY", dest="logy", default = True, help="")
parser.add_option("--sigBoost", dest="sigBoost", default = 1.0, help="")
parser.add_option("--nbins", dest="nbins", default = 0, help="")
parser.add_option("--useData", dest="useData", default = 'True', help="")
parser.add_option("--setMax", dest="max", default = 50, help="")
parser.add_option("--setRangeMin", dest="rangeMin", default = 0, help="")
parser.add_option("--setRangeMax", dest="rangeMax", default = 100, help="")
parser.add_option("--location", dest="location", default = '.', help="")
parser.add_option("--bTag", dest="bTag", default = 'True', help="")
parser.add_option("--predict", dest="predict", default = 'False', help="")
parser.add_option("--predictPtBin", dest="predictPtBin", default = 'False', help="")
parser.add_option("--region", dest="region", default = 'LL', help="")
parser.add_option("--thirdLeptonVeto", dest="thirdLeptonVeto", default = 'False', help="")
parser.add_option("--weight", dest="weight", default = "0.05", help="")
parser.add_option("--yMax", dest="yMax", default = 300, help="")
parser.add_option("--relaxedRegionOption", dest="relaxedRegionOption", default = 'relaxed', help="")
parser.add_option("--usePUWeight", dest="usePUWeight", default = False, action="store_true", help="use pu reweight")
options, args = parser.parse_args()
return options
def conditions(selection, region):
if selection == 1:
return 'OS', region
elif selection == 2:
return 'SS', region
elif selection == 3:
return 'OS', 'relaxed'
elif selection == 4:
return 'SS', 'relaxed'
elif selection == 5:
return 'OS', 'cut-off with iso3'
elif selection == 6:
return 'SS', 'cut-off with iso3'
def getNewLegend(legendHisto, fakeHist):
MCTotal = 0.0
for iHist, iLegend in legendHisto:
if ("H" not in iLegend) and ("observed" not in iLegend):
MCTotal += iHist.Integral(0, iHist.GetNbinsX()+1)
newLegend = []
indexForSignal = 0
for i in range(len(legendHisto)):
if 'H' not in legendHisto[i][1]:
newLegend.append(legendHisto[i])
else:
indexForSignal = i
newLegend.append(legendHisto[indexForSignal])
newLegend.append((fakeHist, 'bkg (%.2f)' %MCTotal))
return newLegend
def getCombinedError(x, x_e, y, y_e):
if x != 0:
x_part = math.pow(x_e/x,2)
else:
x_part = 0
if x_e == 0:
return 0
if y != 0:
y_part = math.pow(y_e/y,2)
else:
y_part = 0
return math.sqrt(x_part + y_part)
def bTagSelection(tree, bTag):
passCut = 0
if bTag == 'True' and tree.CSVJ1 >= 0.679 and tree.CSVJ2 >= 0.244:
passCut = 1
if bTag == 'False':
passCut = 1
if bTag == 'None' and (tree.CSVJ1 < 0.679 and tree.CSVJ2 < 0.679):
passCut = 1
if bTag == '2M' and (tree.CSVJ1 >= 0.679 and tree.CSVJ2 >= 0.679):
passCut = 1
if bTag == '1M1NonM' and (tree.CSVJ1 >= 0.679 and tree.CSVJ2 < 0.679):
passCut = 1
if bTag == '1M' and (tree.CSVJ1 > 0.679):
passCut = 1
return passCut
def getIntegralFromString(info):
integral = info[info.find('(')+1:info.find(')')]
return float(integral)
def passCut(tree, bTag, region, thirdLeptonVeto, relaxedRegionOption = "relaxed"):
isoCut = 3.0
iso_count = 3
isoMax = 10.0
isoTight = 1.5
if "INFN" in relaxedRegionOption:
isoMax = 4.0
isoCut = 1.0
isoTight = 1.0
if "very" in relaxedRegionOption:
isoCut = 1.5
if thirdLeptonVeto == 'True':
if tree.nElectrons > 0 or tree.nMuons>0:
return 0
if bTagSelection(tree, bTag) and abs(tree.eta1.at(0))<2.1 and abs(tree.eta2.at(0))<2.1:
sign_count = 0
if tree.iso1.at(0) > isoMax or tree.iso2.at(0) > isoMax:
return 0
elif tree.iso1.at(0)>isoCut and tree.iso2.at(0)>isoCut:
iso_count = 1
elif tree.iso1.at(0) < isoTight:
if 'tight' in region and tree.iso2.at(0) < isoTight:
iso_count = 0
if 'semiTight' in region and tree.iso2.at(0) > isoCut:
iso_count = 0
elif tree.iso2.at(0) < isoTight:
if 'semiTight' in region and tree.iso1.at(0) > isoCut:
iso_count = 0
if tree.charge1.at(0) - tree.charge2.at(0) == 0:
sign_count = 1
return (iso_count<<1) + sign_count + 1
else:
return 0
def findBin(x, nBins, xMin, xMax):
bin = int(nBins*(x-xMin)/(xMax-xMin))
if bin >= nBins:
return nBins-1
else:
return bin
def dontUnblind(tree):
if 90 < tree.svMass.at(0) < 150:
return True
# if 70 < tree.mJJ < 150:
# return True
# if tree.BDT > 0:
# return True
return False
def getAccuDist(hist, xMin, xMax, name):
nBins = hist.GetNbinsX()
total = hist.Integral()
accuDist = r.TH1F(name, '', nBins, xMin, xMax)
for i in range(nBins):
accuDist.SetBinContent(i+1, hist.Integral(1, i+1)/total)
return accuDist
def getHistos(varName, signalSelection, logY, sigBoost, nbins, useData, max, rangeMin, rangeMax, location, bTag, predict, predictPtBin, region, thirdLeptonVeto, SF, yMax, relaxedRegionOption, usePU):
r.gStyle.SetOptStat(0)
SF = float(SF)
fileList = draw_cfg.MCFileList
histList = []
histList_4QCD = []
QCDHistList = []
QCDHistList_4KS = []
QCDHistList_withScale = []
varRange = [nbins, rangeMin, rangeMax]
nBins = 10000
Lumi = 19.7
legendHistos = []
var_background = []
scaleMCPt = 1.0
tmpFile = []
tmpTree = []
var_data_4KS = []
var_data = []
var_data_4QCD = []
histList_4KS = []
MC_Counts_0 = 0.0
MC_Counts_1 = 0.0
MC_Counts_2 = 0.0
for i in range(6):
var_data.append(r.TH1F('data_%i' %(i),"", varRange[0], varRange[1], varRange[2]))
var_data_4KS.append(r.TH1F('data_4KS_%i' %(i),"", nBins, varRange[1], varRange[2]))
if i < 5:
var_data_4QCD.append(r.TH1F('data_4QCD_%i' %(i),"", varRange[0], varRange[1], varRange[2]))
dataName = draw_cfg.dataFile
fData = r.TFile(dataName)
treeData = fData.Get('eventTree')
print 'Adding events from: %s ...' %dataName
for iEntry in range(treeData.GetEntries()):
treeData.GetEntry(iEntry)
select = passCut(treeData, bTag, region, thirdLeptonVeto, relaxedRegionOption)
if (select == 0) or (select > 6):
continue
if (select == 1) and dontUnblind(treeData):
continue
var_data[select-1].Fill(varsList.findVar(treeData, varName))
if (select != 1):
var_data_4KS[select-2].Fill(varsList.findVar(treeData, varName))
if select == 2:
var_data_4QCD[0].Fill(varsList.findVar(treeData, varName), 1.0)
elif select == 3:
var_data_4QCD[1].Fill(varsList.findVar(treeData, varName), SF)
elif select == 4:
var_data_4QCD[2].Fill(varsList.findVar(treeData, varName), 1.0)
var_data_4QCD[3].Fill(varsList.findVar(treeData, varName), SF)
legendHistos.append([])
for j in range(6):
var_data[j].SetMarkerStyle(8)
var_data[j].SetMarkerSize(0.9)
legendHistos.append([])
integral = 'observed'
if j != 0 or (region != 'tight') or (bTag == 'None'):
integral = 'observed (%.0f)' %var_data[j].Integral(0, varRange[0]+1)
legendHistos[j].append((var_data[j], integral))
for i in range(len(fileList)):
for j in range(6):
histList_4KS.append(r.TH1F('%s_%i_KS' %(fileList[i][0],j),fileList[i][0], nBins, varRange[1], varRange[2]))
histList.append(r.TH1F('%s_%i' %(fileList[i][0],j),fileList[i][0], varRange[0], varRange[1], varRange[2]))
histList_4QCD.append(r.TH1F('%s_%i_2' %(fileList[i][0],j),fileList[i][0], varRange[0], varRange[1], varRange[2]))
print 'Adding events from: %s ...' %(fileList[i][1])
tmpFile.append(r.TFile(fileList[i][1]))
tmpTree.append(tmpFile[i].Get('eventTree'))
for iEntry in range(tmpTree[i].GetEntries()):
tmpTree[i].GetEntry(iEntry)
select = passCut(tmpTree[i], bTag, region, thirdLeptonVeto, relaxedRegionOption)
if (not select) or (select > 6):
continue
if usePU:
allWeights = tmpTree[i].triggerEff*tmpTree[i].PUWeight
else:
allWeights = tmpTree[i].triggerEff
histList[6*i+select-1].Fill(varsList.findVar(tmpTree[i], varName), allWeights*tmpTree[i].xs/(tmpTree[i].initEvents))
histList_4KS[6*i+select-1].Fill(varsList.findVar(tmpTree[i], varName), allWeights*tmpTree[i].xs/(tmpTree[i].initEvents))
if select == 2:
histList_4QCD[6*i].Fill(varsList.findVar(tmpTree[i], varName), allWeights*1.0*tmpTree[i].xs/(tmpTree[i].initEvents))
elif select == 3:
histList_4QCD[6*i+1].Fill(varsList.findVar(tmpTree[i], varName), allWeights*SF*tmpTree[i].xs/(tmpTree[i].initEvents))
elif select == 4:
histList_4QCD[6*i+2].Fill(varsList.findVar(tmpTree[i], varName), allWeights*1.0*tmpTree[i].xs/(tmpTree[i].initEvents))
histList_4QCD[6*i+3].Fill(varsList.findVar(tmpTree[i], varName), allWeights*SF*tmpTree[i].xs/(tmpTree[i].initEvents))
for j in range(6):
var_background.append(r.THStack())
histList[6*i+j].SetFillColor(fileList[i][2])
histList[6*i+j].Scale(Lumi)
histList_4QCD[6*i+j].Scale(Lumi)
histList_4KS[6*i+j].Scale(Lumi)
var_background[j].Add(histList[6*i+j])
legendHistos[j].append((histList[6*i+j], '%s (%.2f)' %(fileList[i][0], histList[6*i+j].Integral(0, varRange[0]+1))))
data_i = []
MC_i = []
data_r = []
MC_r = []
e = []
MC_List = []
for i in range(3):
QCDHistList.append(r.TH1F('QCD_%i' %(i),"", varRange[0], varRange[1], varRange[2]))
QCDHistList_4KS.append(r.TH1F('QCD_%i_KS' %(i),"", nBins, varRange[1], varRange[2]))
MC_List.append(r.TH1F('MC_total_%i' %(i),"", varRange[0], varRange[1], varRange[2]))
for j in range(varRange[0]+2):
dataValue = var_data[i+1].GetBinContent(j)
dataError = var_data[i+1].GetBinError(j)
MCValue = 0
for k in range(len(fileList)):
MCValue += histList[6*k+1+i].GetBinContent(j)
if i == 0:
data_i.append(dataValue)
e.append(dataError)
MC_i.append(MCValue)
if i == 2:
data_r.append(dataValue)
MC_r.append(MCValue)
MC_List[i].SetBinContent(j, MCValue)
if dataValue - MCValue > 0:
QCDHistList[i].SetBinContent(j, dataValue - MCValue)
QCDHistList[i].SetBinError(j, dataError)
MC_List[i].Sumw2()
for j in range(nBins+2):
dataValue4KS = var_data_4KS[i].GetBinContent(j)
MCValue4KS = 0
for k in range(len(fileList)):
MCValue4KS += histList_4KS[6*k+1+i].GetBinContent(j)
if dataValue4KS - MCValue4KS > 0:
QCDHistList_4KS[i].SetBinContent(j, dataValue4KS - MCValue4KS)
ss_t = QCDHistList[0].Integral(0, varRange[0]+1)
ss_l = QCDHistList[2].Integral(0, varRange[0]+1)
os_l = QCDHistList[1].Integral(0, varRange[0]+1)
os_l_data = var_data[2].Integral(0, varRange[0]+1)
print "QCD in SS_T: %.4f" %ss_t
print "QCD in SS_L: %.4f" %ss_l
print "QCD in OS_L: %.4f" %os_l
print "Data in OS_L:%.4f" %os_l_data
print "SF: %.4f" %(ss_t/ss_l)
print "SF qcd/data: %.4f" %(os_l/os_l_data)
for i in range(4):
QCDHistList_withScale.append(r.TH1F('QCD_withScale_%i' %(i),"", varRange[0], varRange[1], varRange[2]))
for j in range(varRange[0]+2):
dataValue = var_data_4QCD[i].GetBinContent(j)
dataError = var_data_4QCD[i].GetBinError(j)
MCValue = 0
for k in range(len(fileList)):
MCValue += histList_4QCD[6*k+i].GetBinContent(j)
if dataValue - MCValue > 0:
QCDHistList_withScale[i].SetBinContent(j, dataValue - MCValue)
QCDDiff = r.TH1F('QCD_diff',"", varRange[0], varRange[1], varRange[2])
QCDDiff2 = r.TH1F('QCD_diff2',"", varRange[0], varRange[1], varRange[2])
QCDDiff_R2T = r.TH1F('QCDDiff_R2T',"", varRange[0], varRange[1], varRange[2])
# QCDDiff2.Sumw2()
fit1 = r.TF1("fit1","[0]", varRange[1],varRange[2])
fit1.SetParName(0,'scale')
fit1.FixParameter(0,1.0)
QCDDiff.Fit('fit1', '0EM')
fit1.SetLineStyle(2)
fit1.SetLineColor(r.kRed)
fit2 = r.TF1("fit2","[0]", varRange[1],varRange[2])
fit2.SetParName(0,'scale')
fit2.FixParameter(0,1.0)
QCDDiff2.Fit('fit2', '0EM')
fit2.SetLineStyle(2)
fit2.SetLineColor(r.kRed)
DrawSignal = False
if signalSelection != '':
var_signal = []
var_signal_4KS = []
for i in range(6):
var_signal.append(r.TH1F('%s_%i' %(signalSelection,i),"", varRange[0], varRange[1], varRange[2]))
var_signal_4KS.append(r.TH1F('%s_%i_4KS' %(signalSelection,i),"", nBins, varRange[1], varRange[2]))
signalDict = draw_cfg.signalDict
if signalSelection in signalDict:
fSignal = r.TFile(signalDict[signalSelection])
treeSignal = fSignal.Get('eventTree')
print 'Adding events from: %s ...' %(signalDict[signalSelection])
for iEntry in range(treeSignal.GetEntries()):
treeSignal.GetEntry(iEntry)
select = passCut(treeSignal, bTag, region, thirdLeptonVeto, relaxedRegionOption)
if (not select) or (select > 6):
continue
if usePU:
allWeights = treeSignal.triggerEff*treeSignal.PUWeight
else:
allWeights = treeSignal.triggerEff
var_signal[select-1].Fill(varsList.findVar(treeSignal, varName), allWeights*treeSignal.xs/(treeSignal.initEvents))
var_signal_4KS[select-1].Fill(varsList.findVar(treeSignal, varName), allWeights*treeSignal.xs/(treeSignal.initEvents))
initNEventsSignal = fSignal.Get('preselection')
for i in range(6):
var_signal[i].SetLineStyle(7)
var_signal[i].SetLineWidth(4)
var_signal[i].Scale(sigBoost*Lumi)
if sigBoost != 1:
sum = var_signal[i].Integral(0, var_signal[i].GetNbinsX()+1)
legendHistos[i].append((var_signal[i], '%sx%0.f (%.2f)' %(signalSelection, sigBoost, var_signal[i].Integral(0, varRange[0]+1))))
else:
legendHistos[i].append((var_signal[i], '%s (%.2f)' %(signalSelection, var_signal[i].Integral(0, varRange[0]+1))))
DrawSignal = True
else:
print '%s not supported, please use H260, H300 or H350' %signalSelection
scale_SS2OS = fit1.GetParameter(0)
scale_er_SS2OS = fit1.GetParError(0)
scale_relaxed2Tight = fit2.GetParameter(0)
scale_er_relaxed2Tight = fit2.GetParError(0)
QCDHistList_withScale[0].Scale(scale_SS2OS)
QCDHistList_withScale[1].Scale(scale_relaxed2Tight)
QCDHistList_withScale[2].Scale(scale_SS2OS)
QCDHistList_withScale[3].Scale(scale_relaxed2Tight)
QCDHistList_withScale[3].SetFillColor(r.TColor.GetColor(250,202,255))
QCDHistList_withScale[2].SetFillColor(r.TColor.GetColor(250,202,255))
QCDHistList_withScale[0].SetLineColor(r.TColor.GetColor(250,202,255))
QCDHistList_withScale[0].SetLineWidth(2)
QCDHistList_withScale[1].SetLineStyle(2)
QCDHistList_withScale[1].SetLineColor(r.TColor.GetColor(250,202,255))
QCDHistList_withScale[1].SetLineWidth(2)
var_background[1].Add(QCDHistList_withScale[3])
var_background[2].Add(QCDHistList_withScale[2])
legendHistos[1].append((QCDHistList_withScale[3], 'QCD (%.2f)' %QCDHistList_withScale[3].Integral(0, varRange[0]+1)))
legendHistos[2].append((QCDHistList_withScale[2], 'QCD (%.2f)' %QCDHistList_withScale[2].Integral(0, varRange[0]+1)))
allStacked = var_background[0].Clone()
QCDPredict = QCDHistList_withScale[1].Clone()
QCDPredict.SetLineStyle(1)
QCDPredict.SetLineWidth(1)
QCDPredict.SetLineColor(r.kBlack)
legendHistos[0].append((QCDPredict, 'QCD (%.0f, SF = %.3f)' %(QCDPredict.Integral(0, varRange[0]+1), SF)))
QCDPredict.SetFillColor(r.TColor.GetColor(250,202,255))
allStacked.Add(QCDPredict)
QCDHistList_withScale[1] = tool.addFakeTHStack(QCDHistList_withScale[1],var_background[0])
QCDHistList_withScale[0] = tool.addFakeTHStack(QCDHistList_withScale[0],var_background[0])
# var_data[1].Sumw2()
# MC_List[1].Sumw2()
for i in range(varRange[0]):
oldValue = var_data[2].GetBinContent(i+1)
mcValue = MC_List[1].GetBinContent(i+1)
if oldValue - mcValue > 0:
QCDDiff2.SetBinContent(i+1, (oldValue - mcValue)/oldValue)
QCDDiff2.SetBinError(i+1, MC_List[1].GetBinError(i+1)/oldValue)
print QCDDiff2.Integral(0, varRange[0]+1)
QCDDiff = var_data[2].Clone()
QCDDiff_sub = QCDHistList_withScale[2].Clone() + MC_List[1].Clone()
QCDDiff.Divide(QCDDiff_sub)
QCDDiff_R2T = var_data[1].Clone()
QCDDiff_R2T_sub = QCDHistList_withScale[3].Clone() + MC_List[0].Clone()
QCDDiff_R2T.Divide(QCDDiff_R2T_sub)
legendPosition = (0.6, 0.7, 0.90, 0.88)
l = []
r.gROOT.SetBatch(True) # to suppress canvas pop-outs
if bTag == 'True':
titleName = '1 Medium 1 Loose b-tag'
fileName = 'bTag'
elif bTag == 'False':
titleName = ''
fileName = 'all'
elif bTag == '2M':
titleName = '2 Medium b-tags'
fileName = '2MbTag'
elif bTag == '1M':
titleName = '1 Medium b-tag'
fileName = '1MbTag'
elif bTag == '1M1NonM':
titleName = '1 Medium 1 Anti-Medium b-tag'
fileName = '1M1NonMbTag'
elif bTag == 'None':
titleName = '0 b-tag'
fileName = '0bTag'
KS1 = QCDHistList_4KS[0].KolmogorovTest(QCDHistList_4KS[2])
KS2 = QCDHistList_4KS[1].KolmogorovTest(QCDHistList_4KS[2])
ks_values = []
tmpHists = []
ks_values2 = []
tmpHists2 = []
nTimes = 10000
# for i in range(nTimes):
# tool.printProcessStatus(i, nTimes, processName = 'Making Sample Histograms')
# tmpHists.append(r.TH1F('tmpHist_%i' %(i),"", nBins, varRange[1], varRange[2]))
# tmpHists[i].FillRandom(QCDHistList_4KS[2], 100)
# ks_values.append(QCDHistList_4KS[0].KolmogorovTest(tmpHists[i]))
# tmpHists2.append(r.TH1F('tmpHist2_%i' %(i),"", nBins, varRange[1], varRange[2]))
# tmpHists2[i].FillRandom(QCDHistList_4KS[2], 100)
# ks_values2.append(QCDHistList_4KS[2].KolmogorovTest(tmpHists[i]))
print ''
print 'KS Test 1: %.3f' %KS1
print 'KS Test 2: %.3f' %KS2
fakeHist = r.TH1F()
fakeHist.SetLineColor(0)
usePUWeightName = ''
if usePU:
usePUWeightName = '_usePU'
psfile = '%s_%s_%s_%s_%s%s.pdf' %(varName, fileName, signalSelection, region, relaxedRegionOption, usePUWeightName)
c = r.TCanvas("c","Test", 800, 900)
#ps = r.TPDF(psfile,112)
c.Divide(2,3)
drawOpt = ''
QCDDiff.SetTitle('Data/Background OS Relaxed Events %s (%.1f fb^{-1}); %s; Data/Background' %(titleName, Lumi,varName))
QCDDiff.SetMarkerStyle(8)
QCDDiff.SetMarkerSize(0.9)
QCDDiff.SetMaximum(4)
QCDDiff.SetMinimum(0)
QCDDiff_R2T.SetTitle('Data/Background OS Relaxed Events %s (%.1f fb^{-1}); %s; Data/Background' %(titleName, Lumi,varName))
QCDDiff_R2T.SetMarkerStyle(8)
QCDDiff_R2T.SetMarkerSize(0.9)
QCDDiff_R2T.SetMaximum(4)
QCDDiff_R2T.SetMinimum(0)
pl_1 = r.TPad("pl_1","pl_1",0.,1,0.5,0.65)
pl_1_delta = r.TPad("pl_1_delta","pl_1_delta",0.,0.65,0.5,0.45)
pl_2 = r.TPad("pl_2","pl_2",0.,0.45,0.5,0.0)
pl_1.SetMargin(1, 1, 0, 1)
pl_1_delta.SetMargin(1, 1, 0.2, 0.05)
pr_1 = r.TPad("pr_1","pr_1",0.5,1,1.,0.65)
pr_1_delta = r.TPad("pr_1_delta","pr_1_delta",0.5,0.65,1.0,0.45)
pr_2 = r.TPad("pr_2","pr_2",0.5,0.45,1.0,0.0)
pr_1.SetMargin(1, 1, 0, 1)
pr_1_delta.SetMargin(1, 1, 0.2, 0.05)
pl_1.Draw()
pl_1_delta.Draw()
pl_2.Draw()
pr_1.Draw()
pr_1_delta.Draw()
pr_2.Draw()
pl_1.cd()
r.gPad.SetTicky()
signSelection, iso = conditions(1, region)
allStacked.SetMaximum(int(yMax))
allStacked.SetTitle('CMS Preliminary %.1f fb^{-1} at 8 TeV; %s; events / bin' %(Lumi,varName))
allStacked.Draw()
var_data[0].Draw('PE same')
legendPosition = (0.43, 0.9 - 0.06*len(legendHistos[0]), 0.87, 0.9)
l.append(tool.setMyLegend(lPosition=legendPosition, lHistList=getNewLegend(legendHistos[0], fakeHist)))
l[0].Draw('same')
var_signal[0].Draw('same')
pl_1_delta.cd()
r.gPad.SetTicky()
r.gPad.SetTickx()
pl_1_delta.SetGridy(1)
bkgEst = QCDHistList_withScale[1].Clone()
delta = var_data[0].Clone()
delta.Sumw2()
bkgEst.Sumw2()
delta.Divide(bkgEst)
delta.SetMinimum(0.5)
delta.SetMaximum(1.5)
delta.GetXaxis().SetTitle(varName)
delta.GetXaxis().SetLabelSize(0.07)
delta.GetXaxis().SetTitleSize(0.07)
delta.GetYaxis().SetLabelSize(0.07)
delta.GetYaxis().SetNdivisions(5,5,0)
delta.Draw()
pList = [pr_1, pl_2, pr_2]
for k in range(1, len(pList)+1):
pList[k-1].cd()
r.gPad.SetTicky()
if k > 1 and logY == 'True':
r.gPad.SetLogy()
signSelection, iso = conditions(k+1, region)
var_background[k].SetTitle('%s %s Events %s (%.1f fb^{-1}); %s; events / bin' %(signSelection, iso, titleName, Lumi,varName))
if k < 2:
var_background[k].SetMaximum(int(yMax))
else:
var_background[k].SetMaximum(max)
var_background[k].SetMinimum(0.01)
var_background[k].Draw()
if useData == 'True':
var_data[k].Draw('PE same')
legendPosition = (0.47, 0.9 - 0.06*len(legendHistos[k]), 0.87, 0.9)
l.append(tool.setMyLegend(lPosition=legendPosition, lHistList=getNewLegend(legendHistos[k], fakeHist)))
if k == 1:
ksLegend1 = tool.setMyLegend((0.2, 0.8, 0.5, 0.9), [(QCDHistList_withScale[3], 'KS Test: %.3f' %KS1)])
ksLegend1.Draw('same')
if k == 2:
ksLegend2 = tool.setMyLegend((0.2, 0.8, 0.5, 0.9), [(QCDHistList_withScale[2], 'KS Test: %.3f' %KS2)])
ksLegend2.Draw('same')
l[k].Draw('same')
var_signal[k].Draw('same')
pr_1_delta.cd()
r.gPad.SetTicky()
r.gPad.SetTickx()
pr_1_delta.SetGridy(1)
bkgEst2 = QCDHistList_withScale[3].Clone()
delta2 = var_data[1].Clone()
delta2.Sumw2()
bkgEst2.Sumw2()
delta2.Divide(bkgEst2)
delta2.SetMinimum(0.5)
delta2.SetMaximum(1.5)
delta2.GetXaxis().SetTitle(varName)
delta2.GetXaxis().SetLabelSize(0.07)
delta2.GetXaxis().SetTitleSize(0.07)
delta2.GetYaxis().SetLabelSize(0.07)
delta2.GetYaxis().SetNdivisions(5,5,0)
delta2.Draw()
c.Update()
c.Print('%s(' %psfile)
c.Clear()
p1 = r.TPad("p1","p1",0.,1,1.,0.4)
p1_r = r.TPad("p1_r","p1_r",0.,0.39,1.,0.06)
p1.SetMargin(1, 1, 0, 1)
p1_r.SetMargin(1, 1, 0.2, 1)
p1.Draw()
p1_r.Draw()
p1.cd()
r.gPad.SetTicky()
allStacked.Draw()
var_data[0].Draw('PE same')
l[0].Draw('same')
var_signal[0].Draw('same')
p1_r.cd()
r.gPad.SetTicky()
r.gPad.SetTickx()
p1_r.SetGridy(1)
delta.Draw()
c.Print('%s)' %psfile)
print "Plot saved at %s" %(psfile)
c.Close()
op = opts()
if op.varName != 'test':
getHistos(op.varName, op.signal, op.logy, float(op.sigBoost), int(op.nbins),
op.useData, float(op.max), float(op.rangeMin), float(op.rangeMax),
op.location, op.bTag, op.predict, 'False', op.region, op.thirdLeptonVeto,
op.weight, op.yMax, op.relaxedRegionOption, op.usePUWeight)
|
[
"zaixing.mao@cern.ch"
] |
zaixing.mao@cern.ch
|
b3c2d2be4040aef38a58cee25d669a3bf9fde47c
|
40f9a3c7a1d90f9ff6ce230ff1bc9ffba2689c92
|
/setup.py
|
9049b95342383d1c4dc0933b16a039d4a6dd2bcb
|
[] |
no_license
|
daemianmack/python-markdown-video
|
f79c52fb85203b242f846fe6614901597cd4d8db
|
551d5f27402b073154b99c1aa9ec2e8aeaeff884
|
refs/heads/master
| 2021-01-18T14:11:44.620641
| 2015-11-02T13:23:31
| 2015-11-02T13:23:31
| 939,130
| 1
| 1
| null | 2015-11-02T13:23:31
| 2010-09-25T20:33:04
|
Python
|
UTF-8
|
Python
| false
| false
| 361
|
py
|
# -*- coding: utf-8 -*-
from distutils.core import setup
from mdx_video import version
setup(
name='mdx_video',
version=version,
description="Markdown 2.0 extension for easy video embedding",
author="Tyler Lesmann",
author_email="redhatcat@gmail.com",
url="http://code.tylerlesmann.com/mdx_video2",
py_modules = ["mdx_video"],
)
|
[
"maturburu@gmail.com"
] |
maturburu@gmail.com
|
8ec7942f489530fd78fc17adda10403d07703211
|
8fa7967bec0bf9aefd2d170113e323bffd61a0fa
|
/shop/migrations/0005_product_discounted_price.py
|
27bbbbc489c4d13faa511e2c2877df5e0ce8a2dd
|
[
"MIT"
] |
permissive
|
RitvikDayal/The-Stone-Shop
|
327395a2ddba86cd60c9483ffd1936b4e6aa197c
|
fed85d5ebb993fb1ce235f5e8a8ebc06a76fb956
|
refs/heads/master
| 2022-12-06T09:05:55.129652
| 2020-08-27T07:24:01
| 2020-08-27T07:24:01
| 290,305,600
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 431
|
py
|
# Generated by Django 3.0.8 on 2020-07-28 12:46
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('shop', '0004_product_product_image'),
]
operations = [
migrations.AddField(
model_name='product',
name='discounted_price',
field=models.FloatField(default=None),
preserve_default=False,
),
]
|
[
"ritvikr1605@gmail.com"
] |
ritvikr1605@gmail.com
|
12ffbea99224b0bf7e1abef29cd18bad1ccd3b21
|
6d44c144381f2c231f9f999aeefbea61bbeee4dc
|
/ShadingRatioModeling/ShadeModel.py
|
b1e379844d7e14eec3e2a432977e5dc03917d6b0
|
[] |
no_license
|
daigo0927/pyworks
|
8d93327e2c8da45380d5acf8497cddfcb94814fe
|
97c3af7ebc31c4857492e6b06c141ecaa7e6fa27
|
refs/heads/master
| 2021-05-01T11:50:22.796770
| 2017-05-13T17:20:38
| 2017-05-13T17:20:38
| 72,403,162
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 10,788
|
py
|
import sys, os
sys.path.append(os.pardir)
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from common.gradient import numerical_gradient
from PIL import Image
from scipy.stats import multivariate_normal
from mpl_toolkits.mplot3d.axes3d import Axes3D
from misc import *
import pdb # pdb.set_trace()
class uGMModel:
def __init__(self,
xy_lim = np.array([30, 30]),
mixture_size = 20,
frame_num = 5,
logistic_coefficient = np.array([50, 0])):
self.dimension = xy_lim.shape[0]
self.mix = mixture_size
self.xgrid = np.arange(xy_lim[0])
self.ygrid = np.arange(xy_lim[1])
self.frame = np.arange(frame_num)
# ! set basis frame : (frame_num-1)/2 : center frame
self.std_frame = (frame_num-1)/2
self.params = {}
# ! set basis frame : (frame_num-1)/2 : center frame
self.params['mus'] = np.random.rand(self.mix, self.dimension)\
* xy_lim
self.params['covs'] = np.array([np.identity(self.dimension) * xy_lim/5.
for i in range(self.mix)])
self.params['pi'] = np.random.dirichlet([3]*self.mix)
self.params['move'] = (np.random.rand(self.mix, self.dimension) - 0.5) \
* xy_lim / 10
self.logistic_coefficient = logistic_coefficient
self.Norms = None
self.q_each = None # each component density value
self.q = None # component mixture value
self.g = None # infered shade ratio
self.lossvalue = None
def predict(self):
mus_plus = np.array([self.params['mus'] \
+ self.params['move'] * (f - self.std_frame) \
for f in self.frame])
self.Norms = [Norm2Dmix(mus = mus_p,
covs = self.params['covs'],
pi = self.params['pi'])
for mus_p in mus_plus]
self.q_each = np.array([[[self.Norms[f].pdf_each(x = np.array([x, y]))
for x in self.xgrid]
for y in self.ygrid]
for f in self.frame])
self.q = np.sum(self.q_each \
* self.params['pi'].reshape(1,1,1,self.mix),
axis = 3)
a, b = self.logistic_coefficient
z = a * self.q + b
self.g = sigmoid(z)
def GenerateFrame(self, f):
mus_p = self.params['mus'] \
+ self.params['move'] * (f - self.std_frame)
Norm = Norm2Dmix(mus = mus_p,
covs = self.params['covs'],
pi = self.params['pi'])
q = np.array([[Norm.pdf(x = np.array([x, y]))
for x in self.xgrid]
for y in self.ygrid])
a, b = self.logistic_coefficient
z = a * q + b
g = sigmoid(z)
# pdb.set_trace()
return g
def loss(self, f): # f : data value (not frame)
self.predict()
a ,b = self.logistic_coefficient
z = a * self.q + b
U_q = 1/a * np.log(1 + np.exp(z)) # U function : integrated logistic sigmoid
self.lossvalue = U_q - f * self.q
def gradient_move(self, f):
self.loss(f = f)
mus_plus = np.array([self.params['mus'] \
+ self.params['move'] * (frm - self.std_frame) \
for frm in self.frame])
# shape(frame, ygrid, xgrid, mix, 2)
grad_move = np.array([[[ self.params['pi'].reshape(self.mix, 1) \
* (self.g[frm, y, x] - f[frm, y, x]) \
* self.q_each[frm, y, x, :].reshape(self.mix, 1) \
* (frm - self.std_frame) \
* np.linalg.solve(self.params['covs'],
(np.array([x, y]) - mus_plus[frm])) \
for x in self.xgrid]
for y in self.ygrid]
for frm in self.frame])
return grad_move
def modelplot(self, update = False):
if(update == True):
self.predict()
plt.figure(figsize = (10, 3.5*(1+self.frame.size)))
for frm in self.frame:
plt.subplot(self.frame.size+1, 2, frm*2+1)
plt.title('approxed shade ratio')
sns.heatmap(self.g[frm], annot = False, cmap = 'YlGnBu_r',
vmin = 0, vmax = 1)
plt.subplot(self.frame.size+1, 2, frm*2+2)
plt.title('approxed probability density')
sns.heatmap(self.q[frm], annot = False, cmap = 'YlGnBu_r')
sns.plt.show()
class uEpaMixModel(object):
def __init__(self,
xy_lim = np.array([30, 30]),
mixture_size = 20,
frame_num = 5,
logistic_coefficient = np.array([50, 0])):
self.dimension = xy_lim.shape[0]
self.mix = mixture_size
self.xgrid = np.arange(start = 0, stop = xy_lim[0], step = 1)
self.ygrid = np.arange(start = 0, stop = xy_lim[1], step = 1)
self.frame = np.arange(frame_num)
self.params = {}
self.params['mus'] = \
np.random.rand(self.mix, self.dimension)\
*(xy_lim)
tmp = np.array([np.identity(self.dimension)*xy_lim/0.1 \
for i in range(self.mix)])
self.params['covs'] = tmp
self.params['pi'] = np.random.dirichlet([3]*self.mix)
move = np.random.rand(self.mix, self.dimension)-0.5
self.params['move'] = move * xy_lim / 10
self.mus_plus = None
self.logistic_coefficient = logistic_coefficient
self.q = None
self.q_specific = None
# self.Epas[frame].Epas[mix]
self.Epas = None
self.Epas_specific_frame = None
def predict_specific_frame(self, obj_frame = 0):
self.Epas_specific_frame = Epanechnikov2Dmix(mus = self.params['mus'] \
+ self.params['move'] * obj_frame,
covs = self.params['covs'],
pi = self.params['pi'])
q_and_mask = np.array([[self.Epas_specific_frame.pdf_and_mask(x = np.array([x, y]))
for x in self.xgrid]
for y in self.ygrid])
self.q_specific = q_and_mask[:,:,0]
mask = q_and_mask[:,:,1:]
# pdb.set_trace()
# return specific frame predict value, and component mask
# q.shape : (ygird ,xgrid)
# mask.shape : (ygrid, xgrid, mix)
return self.q_specific, mask
def predict(self):
self.mus_plus = np.array([self.params['mus'] + self.params['move'] * f \
for f in self.frame])
self.Epas = [Epanechnikov2Dmix(mus = mus_p,
covs = self.params['covs'],
pi = self.params['pi'])
for mus_p in self.mus_plus]
# compute from given frames
q_and_mask = np.array([[[self.Epas[f].pdf_and_mask(x = np.array([x, y]))
for x in self.xgrid]
for y in self.ygrid]
for f in self.frame])
# pdb.set_trace()
self.q = q_and_mask[:,:,:,0]
mask = q_and_mask[:,:,:,1:]
# return predict value, and component mask
# q.shape : (frame, ygird ,xgrid)
# mask.shape : (frame, ygrid, xgrid, mix)
return self.q, mask
def loss_specific_frame(self, f, obj_frame = 0):
q, mask = self.predict_specific_frame(obj_frame = obj_frame)
a, b = self.logistic_coefficient
z = a * q + b
U_q = 1/a * np.log(1 + np.exp(z))
loss = U_q - f[obj_frame] * q
return loss, mask
def loss(self, f): # f : data value
q, mask = self.predict()
a, b = self.logistic_coefficient
z = a * q + b
U_q = 1/a * np.log(1 + np.exp(z))
loss = U_q - f * q
return loss, mask
def gradient_specific_frame(self, f, obj_frame = 0):
# it will get better for estimate mus, covs, and pi at first (without move)
# return gradient of mus, covs, pi
loss, mask = self.loss_specific_frame(f = f,
obj_frame = obj_frame)
self.Epas_specific_frame
mus = self.params['mus'] + self.params['move'] * obj_frame
covs = self.params['covs']
a, b = self.logistic_coefficient
z = a * self.q_specific + b
g = sigmoid(z)
f = f[obj_frame]
dpi = np.array([[ (g[y,x] - f[y,x]) \
* self.Epas_specific_frame.pdf_each(x = np.array([x, y]))
for x in self.xgrid]
for y in self.ygrid])
dmus = np.array([[ self.params['pi'] * (g[y,x] - f[y,x]) * 2 \
* np.linalg.solve(covs, (np.array([x, y]) - mus) ).T \
* mask[y, x]
for x in self.xgrid]
for y in self.ygrid])
# dcovs = np.array([[ ]])
pdb.set_trace()
def gradient(self, f):
loss, mask = self.loss(f = f)
a, b = self.logistic_coefficient
z = a * self.q + b
g = sigmoid(z)
def ModelPlot(self, frame = range(5), axtype='contourf'):
if(self.predict_value== None): self.predict_value = self.predict()
x = self.xy[:, 0]
y = self.xy[:, 1]
Z = self.predict_value
xgrid = x.reshape(self.grid[0], self.grid[1])
ygrid = y.reshape(self.grid[0], self.grid[1])
for f in frame:
fig = plt.figure()
ax = Axes3D(fig)
z = Z[f]
zgrid = z.reshape(self.grid[0], self.grid[1])
if(axtype == 'wireframe'): ax.plot_wireframe(x, y, z)
elif(axtype == 'contour'): ax.contour3D(xgrid, ygrid, zgrid)
elif(axtype == 'contourf'): ax.contourf3D(xgrid, ygrid, zgrid)
plt.show()
|
[
"tappy.hello-planet@ezweb.ne.jp"
] |
tappy.hello-planet@ezweb.ne.jp
|
771d8fdd6297663bfb24be64ff621e13d7eb2298
|
a86d1744f3b77d4dbdfc01949d24f707205dfba1
|
/Euler19.py
|
d60650fe8fe9e4a3bb9ffe6806889183d67823a1
|
[] |
no_license
|
ThomasR75/python_work
|
6da1737d5d54db654f3b022c3ea43659f90ec87d
|
0ae9d93dc2a7934b93062433e3ce136e8c8fdcea
|
refs/heads/master
| 2023-09-02T01:44:27.585714
| 2023-08-28T12:06:44
| 2023-08-28T12:06:44
| 214,607,833
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 428
|
py
|
#Euler 19
import datetime
year = 1901
month = 1
day = 1
counter = 0
while year < 2001:
while month < 13:
date = datetime.datetime(year, month, day)
x = datetime.datetime.weekday(date)
if x == 6:
counter = counter + 1
month = month + 1
year = year + 1
month = 1
print(counter)
#date = datetime.datetime(year, month, day)
#x = datetime.datetime.weekday(date)
#print(x)
|
[
"ThomasR75@gmail.com"
] |
ThomasR75@gmail.com
|
d7f644a7dba5f32a8525416d4e6ae82d13e7e038
|
c7654051c33c72cf2da830a522495bf0b6ba6d2b
|
/get_article_info.py
|
3695824556aec90858639ebda8dd066d18ee9c81
|
[] |
no_license
|
SamuelYJKim/checkbait
|
74044ea29b24bb107e183eb2d7235c76154886e5
|
05db8968535e7342ae1e6190d3492b08b79a7645
|
refs/heads/main
| 2022-12-30T18:56:57.819089
| 2020-10-19T05:39:20
| 2020-10-19T05:39:20
| 305,045,341
| 1
| 0
| null | 2020-10-18T07:20:28
| 2020-10-18T07:20:28
| null |
UTF-8
|
Python
| false
| false
| 717
|
py
|
# Retrieve news article url and returns info about article.
# Title, Summary, Keywords
from newsfetch.news import newspaper
from summa import summarizer
#article = "https://www.cnn.com/2020/10/17/health/us-coronavirus-saturday/index.html"
# Returns title of article with URL
def get_title(url):
return newspaper(url).headline
# Returns list of keywords with URL
def get_keywords(url, queue):
news = newspaper(url)
queue.put(news.keywords)
# return news.keywords
# Returns summary of article (100 CHAR) with article text.
def summarize(url, queue):
news = newspaper(url)
queue.put(summarizer.summarize(news.article, words=100))
# return summarizer.summarize(news.article, words=100)
|
[
"atalefard@gmail.com"
] |
atalefard@gmail.com
|
7e2e5b6da245f3631bc35adfab5d6b5c034f2293
|
4fa68c72849d96cc2e8c04649f18573b605e6e8c
|
/prymer/converter.py
|
43c186561ba5ba768413327283209efa765ab63c
|
[] |
no_license
|
jrjhealey/prymer
|
3bb1d6cd83272dcdaff810dace3fd68bfa7a6576
|
c82068cc5b560d602f4eb99af42dacd904ecc4be
|
refs/heads/master
| 2020-04-08T00:15:11.201293
| 2019-02-27T14:47:36
| 2019-02-27T14:47:36
| 158,843,944
| 4
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,662
|
py
|
# A converter function to allow multiple input sequence types
# Big thanks to Peter van Heusden for refactoring the genbank code to yeild generators!
import os
from Bio import SeqIO
from Bio.SeqRecord import SeqRecord
class InputFormatException(Exception):
"""The exception to return when an input format cannot be identified or coerced (informative name)"""
def __init__(self, message):
super().__init__(message)
def yield_from_genbank(infile):
for rec in SeqIO.parse(infile, 'genbank'):
cds_features = (f for f in rec.features if f.type == 'CDS')
for feat in cds_features:
try:
header = feat.qualifiers['gene'][0] + '_' + rec.id
except KeyError:
header = feat.qualifiers['locus_tag'][0] + '_' + rec.id
header.replace(' ', '_')
yield SeqRecord(id=header, seq=feat.location.extract(rec).seq)
def convert_seqs(infile):
with open(infile, 'r') as ifh:
firstline = ifh.readline()
if os.path.splitext(infile)[1] in (".fasta", ".fa", ".fas", ".fna"):
try:
assert firstline[0] == '>'
except AssertionError:
raise InputFormatException("File extension implies fasta but the first line doesn't look like a header.")
return SeqIO.parse(infile, 'fasta')
elif os.path.splitext(infile)[1] in (".gbk", ".gb", ".genbank", ".gbff"):
try:
assert firstline.startswith("LOCUS")
except AssertionError:
raise InputFormatException("File extension implies genbank, but the first line doesn't look like a header.")
return yield_from_genbank(infile)
|
[
"jrj.healey@gmail.com"
] |
jrj.healey@gmail.com
|
0c13a0e21cc134454428127ba79f39c2fe89e3f7
|
84f08baaab25012fe46d16a6b7c3180fb4bb1097
|
/shop/views.py
|
bd5ab57a66a1d8ffc807f879f1db859bde2457ac
|
[] |
no_license
|
caravan4eg/myshop
|
24fdccd4fd78720a0fbafa1565668f5229044d10
|
c00c12ebf09b84a7713ecf06ba5c128297b4b069
|
refs/heads/master
| 2022-10-19T20:16:54.372608
| 2020-06-11T16:26:50
| 2020-06-11T16:26:50
| 268,129,353
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,050
|
py
|
from django.shortcuts import render, get_object_or_404
from .models import Category, Product
from cart.forms import CartAddProductForm
def product_list(request, category_slug=None):
category = None
categories = Category.objects.all()
products = Product.objects.filter(available=True)
if category_slug:
category = get_object_or_404(Category, slug=category_slug)
products = products.filter(category=category)
context = {'category': category,
'categories': categories,
'products': products}
return render(request, 'shop/product/list.html', context)
def product_detail(request, id, slug):
product = get_object_or_404(Product,
id=id,
slug=slug,
available=True)
cart_product_form = CartAddProductForm()
return render(request,
'shop/product/detail.html',
{'product': product,
'cart_product_form': cart_product_form})
|
[
"alex.apanasik@gmail.com"
] |
alex.apanasik@gmail.com
|
e25c7e7229cb9c605258c662fe0855bcd3294a98
|
c655f659c7dcd2657aeeef0177731f41790ad780
|
/retrain.py
|
a8d9ccda035d55a7945802e155af48459015f2b1
|
[] |
no_license
|
AbdesslemOuerghemmi/Mvision
|
693b33923c8239c9e301db3f7f3af2d376ce728c
|
41529923b39a6db8230f77957be8da68b1412350
|
refs/heads/main
| 2023-01-19T03:05:01.226348
| 2020-11-20T11:54:00
| 2020-11-20T11:54:00
| 314,537,091
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 55,866
|
py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
r"""Simple transfer learning with Inception v3 or Mobilenet models.
With support for TensorBoard.
This example shows how to take a Inception v3 or Mobilenet model trained on
ImageNet images, and train a new top layer that can recognize other classes of
images.
The top layer receives as input a 2048-dimensional vector (1001-dimensional for
Mobilenet) for each image. We train a softmax layer on top of this
representation. Assuming the softmax layer contains N labels, this corresponds
to learning N + 2048*N (or 1001*N) model parameters corresponding to the
learned biases and weights.
Here's an example, which assumes you have a folder containing class-named
subfolders, each full of images for each label. The example folder flower_photos
should have a structure like this:
~/flower_photos/daisy/photo1.jpg
~/flower_photos/daisy/photo2.jpg
...
~/flower_photos/rose/anotherphoto77.jpg
...
~/flower_photos/sunflower/somepicture.jpg
The subfolder names are important, since they define what label is applied to
each image, but the filenames themselves don't matter. Once your images are
prepared, you can run the training with a command like this:
```bash
bazel build tensorflow/examples/image_retraining:retrain && \
bazel-bin/tensorflow/examples/image_retraining/retrain \
--image_dir ~/flower_photos
```
Or, if you have a pip installation of tensorflow, `retrain.py` can be run
without bazel:
```bash
python tensorflow/examples/image_retraining/retrain.py \
--image_dir ~/flower_photos
```
You can replace the image_dir argument with any folder containing subfolders of
images. The label for each image is taken from the name of the subfolder it's
in.
This produces a new model file that can be loaded and run by any TensorFlow
program, for example the label_image sample code.
By default this script will use the high accuracy, but comparatively large and
slow Inception v3 model architecture. It's recommended that you start with this
to validate that you have gathered good training data, but if you want to deploy
on resource-limited platforms, you can try the `--architecture` flag with a
Mobilenet model. For example:
```bash
python tensorflow/examples/image_retraining/retrain.py \
--image_dir ~/flower_photos --architecture mobilenet_1.0_224
```
There are 32 different Mobilenet models to choose from, with a variety of file
size and latency options. The first number can be '1.0', '0.75', '0.50', or
'0.25' to control the size, and the second controls the input image size, either
'224', '192', '160', or '128', with smaller sizes running faster. See
https://research.googleblog.com/2017/06/mobilenets-open-source-models-for.html
for more information on Mobilenet.
To use with TensorBoard:
By default, this script will log summaries to /tmp/retrain_logs directory
Visualize the summaries with this command:
tensorboard --logdir /tmp/retrain_logs
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import collections
from datetime import datetime
import hashlib
import os.path
import random
import re
import sys
import tarfile
import numpy as np
from six.moves import urllib
import tensorflow as tf
from tensorflow.python.framework import graph_util
from tensorflow.python.framework import tensor_shape
from tensorflow.python.platform import gfile
from tensorflow.python.util import compat
FLAGS = None
# These are all parameters that are tied to the particular model architecture
# we're using for Inception v3. These include things like tensor names and their
# sizes. If you want to adapt this script to work with another model, you will
# need to update these to reflect the values in the network you're using.
MAX_NUM_IMAGES_PER_CLASS = 2 ** 27 - 1 # ~134M
def create_image_lists(image_dir, testing_percentage, validation_percentage):
"""Builds a list of training images from the file system.
Analyzes the sub folders in the image directory, splits them into stable
training, testing, and validation sets, and returns a data structure
describing the lists of images for each label and their paths.
Args:
image_dir: String path to a folder containing subfolders of images.
testing_percentage: Integer percentage of the images to reserve for tests.
validation_percentage: Integer percentage of images reserved for validation.
Returns:
A dictionary containing an entry for each label subfolder, with images split
into training, testing, and validation sets within each label.
"""
if not gfile.Exists(image_dir):
tf.logging.error("Image directory '" + image_dir + "' not found.")
return None
result = collections.OrderedDict()
sub_dirs = [
os.path.join(image_dir,item)
for item in gfile.ListDirectory(image_dir)]
sub_dirs = sorted(item for item in sub_dirs
if gfile.IsDirectory(item))
for sub_dir in sub_dirs:
extensions = ['jpg', 'jpeg', 'JPG', 'JPEG']
file_list = []
dir_name = os.path.basename(sub_dir)
if dir_name == image_dir:
continue
tf.logging.info("Looking for images in '" + dir_name + "'")
for extension in extensions:
file_glob = os.path.join(image_dir, dir_name, '*.' + extension)
file_list.extend(gfile.Glob(file_glob))
if not file_list:
tf.logging.warning('No files found')
continue
if len(file_list) < 20:
tf.logging.warning(
'WARNING: Folder has less than 20 images, which may cause issues.')
elif len(file_list) > MAX_NUM_IMAGES_PER_CLASS:
tf.logging.warning(
'WARNING: Folder {} has more than {} images. Some images will '
'never be selected.'.format(dir_name, MAX_NUM_IMAGES_PER_CLASS))
label_name = re.sub(r'[^a-z0-9]+', ' ', dir_name.lower())
training_images = []
testing_images = []
validation_images = []
for file_name in file_list:
base_name = os.path.basename(file_name)
# We want to ignore anything after '_nohash_' in the file name when
# deciding which set to put an image in, the data set creator has a way of
# grouping photos that are close variations of each other. For example
# this is used in the plant disease data set to group multiple pictures of
# the same leaf.
hash_name = re.sub(r'_nohash_.*$', '', file_name)
# This looks a bit magical, but we need to decide whether this file should
# go into the training, testing, or validation sets, and we want to keep
# existing files in the same set even if more files are subsequently
# added.
# To do that, we need a stable way of deciding based on just the file name
# itself, so we do a hash of that and then use that to generate a
# probability value that we use to assign it.
hash_name_hashed = hashlib.sha1(compat.as_bytes(hash_name)).hexdigest()
percentage_hash = ((int(hash_name_hashed, 16) %
(MAX_NUM_IMAGES_PER_CLASS + 1)) *
(100.0 / MAX_NUM_IMAGES_PER_CLASS))
if len(validation_images) == 0:
validation_images.append(base_name)
elif percentage_hash < validation_percentage:
testing_images.append(base_name)
else:
training_images.append(base_name)
result[label_name] = {
'dir': dir_name,
'training': training_images,
'testing': testing_images,
'validation': validation_images,
}
return result
def get_image_path(image_lists, label_name, index, image_dir, category):
""""Returns a path to an image for a label at the given index.
Args:
image_lists: Dictionary of training images for each label.
label_name: Label string we want to get an image for.
index: Int offset of the image we want. This will be moduloed by the
available number of images for the label, so it can be arbitrarily large.
image_dir: Root folder string of the subfolders containing the training
images.
category: Name string of set to pull images from - training, testing, or
validation.
Returns:
File system path string to an image that meets the requested parameters.
"""
if label_name not in image_lists:
tf.logging.fatal('Label does not exist %s.', label_name)
label_lists = image_lists[label_name]
if category not in label_lists:
tf.logging.fatal('Category does not exist %s.', category)
category_list = label_lists[category]
if not category_list:
tf.logging.fatal('Label %s has no images in the category %s.',
label_name, category)
mod_index = index % len(category_list)
base_name = category_list[mod_index]
sub_dir = label_lists['dir']
full_path = os.path.join(image_dir, sub_dir, base_name)
return full_path
def get_bottleneck_path(image_lists, label_name, index, bottleneck_dir,
category, architecture):
""""Returns a path to a bottleneck file for a label at the given index.
Args:
image_lists: Dictionary of training images for each label.
label_name: Label string we want to get an image for.
index: Integer offset of the image we want. This will be moduloed by the
available number of images for the label, so it can be arbitrarily large.
bottleneck_dir: Folder string holding cached files of bottleneck values.
category: Name string of set to pull images from - training, testing, or
validation.
architecture: The name of the model architecture.
Returns:
File system path string to an image that meets the requested parameters.
"""
return get_image_path(image_lists, label_name, index, bottleneck_dir,
category) + '_' + architecture + '.txt'
def create_model_graph(model_info):
""""Creates a graph from saved GraphDef file and returns a Graph object.
Args:
model_info: Dictionary containing information about the model architecture.
Returns:
Graph holding the trained Inception network, and various tensors we'll be
manipulating.
"""
with tf.Graph().as_default() as graph:
model_path = os.path.join(FLAGS.model_dir, model_info['model_file_name'])
with gfile.FastGFile(model_path, 'rb') as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
bottleneck_tensor, resized_input_tensor = (tf.import_graph_def(
graph_def,
name='',
return_elements=[
model_info['bottleneck_tensor_name'],
model_info['resized_input_tensor_name'],
]))
return graph, bottleneck_tensor, resized_input_tensor
def run_bottleneck_on_image(sess, image_data, image_data_tensor,
decoded_image_tensor, resized_input_tensor,
bottleneck_tensor):
"""Runs inference on an image to extract the 'bottleneck' summary layer.
Args:
sess: Current active TensorFlow Session.
image_data: String of raw JPEG data.
image_data_tensor: Input data layer in the graph.
decoded_image_tensor: Output of initial image resizing and preprocessing.
resized_input_tensor: The input node of the recognition graph.
bottleneck_tensor: Layer before the final softmax.
Returns:
Numpy array of bottleneck values.
"""
# First decode the JPEG image, resize it, and rescale the pixel values.
resized_input_values = sess.run(decoded_image_tensor,
{image_data_tensor: image_data})
# Then run it through the recognition network.
bottleneck_values = sess.run(bottleneck_tensor,
{resized_input_tensor: resized_input_values})
bottleneck_values = np.squeeze(bottleneck_values)
return bottleneck_values
def maybe_download_and_extract(data_url):
"""Download and extract model tar file.
If the pretrained model we're using doesn't already exist, this function
downloads it from the TensorFlow.org website and unpacks it into a directory.
Args:
data_url: Web location of the tar file containing the pretrained model.
"""
dest_directory = FLAGS.model_dir
if not os.path.exists(dest_directory):
os.makedirs(dest_directory)
filename = data_url.split('/')[-1]
filepath = os.path.join(dest_directory, filename)
if not os.path.exists(filepath):
def _progress(count, block_size, total_size):
sys.stdout.write('\r>> Downloading %s %.1f%%' %
(filename,
float(count * block_size) / float(total_size) * 100.0))
sys.stdout.flush()
filepath, _ = urllib.request.urlretrieve(data_url, filepath, _progress)
print()
statinfo = os.stat(filepath)
tf.logging.info('Successfully downloaded', filename, statinfo.st_size,
'bytes.')
tarfile.open(filepath, 'r:gz').extractall(dest_directory)
def ensure_dir_exists(dir_name):
"""Makes sure the folder exists on disk.
Args:
dir_name: Path string to the folder we want to create.
"""
if not os.path.exists(dir_name):
os.makedirs(dir_name)
bottleneck_path_2_bottleneck_values = {}
def create_bottleneck_file(bottleneck_path, image_lists, label_name, index,
image_dir, category, sess, jpeg_data_tensor,
decoded_image_tensor, resized_input_tensor,
bottleneck_tensor):
"""Create a single bottleneck file."""
tf.logging.info('Creating bottleneck at ' + bottleneck_path)
image_path = get_image_path(image_lists, label_name, index,
image_dir, category)
if not gfile.Exists(image_path):
tf.logging.fatal('File does not exist %s', image_path)
image_data = gfile.FastGFile(image_path, 'rb').read()
try:
bottleneck_values = run_bottleneck_on_image(
sess, image_data, jpeg_data_tensor, decoded_image_tensor,
resized_input_tensor, bottleneck_tensor)
except Exception as e:
raise RuntimeError('Error during processing file %s (%s)' % (image_path,
str(e)))
bottleneck_string = ','.join(str(x) for x in bottleneck_values)
with open(bottleneck_path, 'w') as bottleneck_file:
bottleneck_file.write(bottleneck_string)
def get_or_create_bottleneck(sess, image_lists, label_name, index, image_dir,
category, bottleneck_dir, jpeg_data_tensor,
decoded_image_tensor, resized_input_tensor,
bottleneck_tensor, architecture):
"""Retrieves or calculates bottleneck values for an image.
If a cached version of the bottleneck data exists on-disk, return that,
otherwise calculate the data and save it to disk for future use.
Args:
sess: The current active TensorFlow Session.
image_lists: Dictionary of training images for each label.
label_name: Label string we want to get an image for.
index: Integer offset of the image we want. This will be modulo-ed by the
available number of images for the label, so it can be arbitrarily large.
image_dir: Root folder string of the subfolders containing the training
images.
category: Name string of which set to pull images from - training, testing,
or validation.
bottleneck_dir: Folder string holding cached files of bottleneck values.
jpeg_data_tensor: The tensor to feed loaded jpeg data into.
decoded_image_tensor: The output of decoding and resizing the image.
resized_input_tensor: The input node of the recognition graph.
bottleneck_tensor: The output tensor for the bottleneck values.
architecture: The name of the model architecture.
Returns:
Numpy array of values produced by the bottleneck layer for the image.
"""
label_lists = image_lists[label_name]
sub_dir = label_lists['dir']
sub_dir_path = os.path.join(bottleneck_dir, sub_dir)
ensure_dir_exists(sub_dir_path)
bottleneck_path = get_bottleneck_path(image_lists, label_name, index,
bottleneck_dir, category, architecture)
if not os.path.exists(bottleneck_path):
create_bottleneck_file(bottleneck_path, image_lists, label_name, index,
image_dir, category, sess, jpeg_data_tensor,
decoded_image_tensor, resized_input_tensor,
bottleneck_tensor)
with open(bottleneck_path, 'r') as bottleneck_file:
bottleneck_string = bottleneck_file.read()
did_hit_error = False
try:
bottleneck_values = [float(x) for x in bottleneck_string.split(',')]
except ValueError:
tf.logging.warning('Invalid float found, recreating bottleneck')
did_hit_error = True
if did_hit_error:
create_bottleneck_file(bottleneck_path, image_lists, label_name, index,
image_dir, category, sess, jpeg_data_tensor,
decoded_image_tensor, resized_input_tensor,
bottleneck_tensor)
with open(bottleneck_path, 'r') as bottleneck_file:
bottleneck_string = bottleneck_file.read()
# Allow exceptions to propagate here, since they shouldn't happen after a
# fresh creation
bottleneck_values = [float(x) for x in bottleneck_string.split(',')]
return bottleneck_values
def cache_bottlenecks(sess, image_lists, image_dir, bottleneck_dir,
jpeg_data_tensor, decoded_image_tensor,
resized_input_tensor, bottleneck_tensor, architecture):
"""Ensures all the training, testing, and validation bottlenecks are cached.
Because we're likely to read the same image multiple times (if there are no
distortions applied during training) it can speed things up a lot if we
calculate the bottleneck layer values once for each image during
preprocessing, and then just read those cached values repeatedly during
training. Here we go through all the images we've found, calculate those
values, and save them off.
Args:
sess: The current active TensorFlow Session.
image_lists: Dictionary of training images for each label.
image_dir: Root folder string of the subfolders containing the training
images.
bottleneck_dir: Folder string holding cached files of bottleneck values.
jpeg_data_tensor: Input tensor for jpeg data from file.
decoded_image_tensor: The output of decoding and resizing the image.
resized_input_tensor: The input node of the recognition graph.
bottleneck_tensor: The penultimate output layer of the graph.
architecture: The name of the model architecture.
Returns:
Nothing.
"""
how_many_bottlenecks = 0
ensure_dir_exists(bottleneck_dir)
for label_name, label_lists in image_lists.items():
for category in ['training', 'testing', 'validation']:
category_list = label_lists[category]
for index, unused_base_name in enumerate(category_list):
get_or_create_bottleneck(
sess, image_lists, label_name, index, image_dir, category,
bottleneck_dir, jpeg_data_tensor, decoded_image_tensor,
resized_input_tensor, bottleneck_tensor, architecture)
how_many_bottlenecks += 1
if how_many_bottlenecks % 100 == 0:
tf.logging.info(
str(how_many_bottlenecks) + ' bottleneck files created.')
def get_random_cached_bottlenecks(sess, image_lists, how_many, category,
bottleneck_dir, image_dir, jpeg_data_tensor,
decoded_image_tensor, resized_input_tensor,
bottleneck_tensor, architecture):
"""Retrieves bottleneck values for cached images.
If no distortions are being applied, this function can retrieve the cached
bottleneck values directly from disk for images. It picks a random set of
images from the specified category.
Args:
sess: Current TensorFlow Session.
image_lists: Dictionary of training images for each label.
how_many: If positive, a random sample of this size will be chosen.
If negative, all bottlenecks will be retrieved.
category: Name string of which set to pull from - training, testing, or
validation.
bottleneck_dir: Folder string holding cached files of bottleneck values.
image_dir: Root folder string of the subfolders containing the training
images.
jpeg_data_tensor: The layer to feed jpeg image data into.
decoded_image_tensor: The output of decoding and resizing the image.
resized_input_tensor: The input node of the recognition graph.
bottleneck_tensor: The bottleneck output layer of the CNN graph.
architecture: The name of the model architecture.
Returns:
List of bottleneck arrays, their corresponding ground truths, and the
relevant filenames.
"""
class_count = len(image_lists.keys())
bottlenecks = []
ground_truths = []
filenames = []
if how_many >= 0:
# Retrieve a random sample of bottlenecks.
for unused_i in range(how_many):
label_index = random.randrange(class_count)
label_name = list(image_lists.keys())[label_index]
image_index = random.randrange(MAX_NUM_IMAGES_PER_CLASS + 1)
image_name = get_image_path(image_lists, label_name, image_index,
image_dir, category)
bottleneck = get_or_create_bottleneck(
sess, image_lists, label_name, image_index, image_dir, category,
bottleneck_dir, jpeg_data_tensor, decoded_image_tensor,
resized_input_tensor, bottleneck_tensor, architecture)
ground_truth = np.zeros(class_count, dtype=np.float32)
ground_truth[label_index] = 1.0
bottlenecks.append(bottleneck)
ground_truths.append(ground_truth)
filenames.append(image_name)
else:
# Retrieve all bottlenecks.
for label_index, label_name in enumerate(image_lists.keys()):
for image_index, image_name in enumerate(
image_lists[label_name][category]):
image_name = get_image_path(image_lists, label_name, image_index,
image_dir, category)
bottleneck = get_or_create_bottleneck(
sess, image_lists, label_name, image_index, image_dir, category,
bottleneck_dir, jpeg_data_tensor, decoded_image_tensor,
resized_input_tensor, bottleneck_tensor, architecture)
ground_truth = np.zeros(class_count, dtype=np.float32)
ground_truth[label_index] = 1.0
bottlenecks.append(bottleneck)
ground_truths.append(ground_truth)
filenames.append(image_name)
return bottlenecks, ground_truths, filenames
def get_random_distorted_bottlenecks(
sess, image_lists, how_many, category, image_dir, input_jpeg_tensor,
distorted_image, resized_input_tensor, bottleneck_tensor):
"""Retrieves bottleneck values for training images, after distortions.
If we're training with distortions like crops, scales, or flips, we have to
recalculate the full model for every image, and so we can't use cached
bottleneck values. Instead we find random images for the requested category,
run them through the distortion graph, and then the full graph to get the
bottleneck results for each.
Args:
sess: Current TensorFlow Session.
image_lists: Dictionary of training images for each label.
how_many: The integer number of bottleneck values to return.
category: Name string of which set of images to fetch - training, testing,
or validation.
image_dir: Root folder string of the subfolders containing the training
images.
input_jpeg_tensor: The input layer we feed the image data to.
distorted_image: The output node of the distortion graph.
resized_input_tensor: The input node of the recognition graph.
bottleneck_tensor: The bottleneck output layer of the CNN graph.
Returns:
List of bottleneck arrays and their corresponding ground truths.
"""
class_count = len(image_lists.keys())
bottlenecks = []
ground_truths = []
for unused_i in range(how_many):
label_index = random.randrange(class_count)
label_name = list(image_lists.keys())[label_index]
image_index = random.randrange(MAX_NUM_IMAGES_PER_CLASS + 1)
image_path = get_image_path(image_lists, label_name, image_index, image_dir,
category)
if not gfile.Exists(image_path):
tf.logging.fatal('File does not exist %s', image_path)
jpeg_data = gfile.FastGFile(image_path, 'rb').read()
# Note that we materialize the distorted_image_data as a numpy array before
# sending running inference on the image. This involves 2 memory copies and
# might be optimized in other implementations.
distorted_image_data = sess.run(distorted_image,
{input_jpeg_tensor: jpeg_data})
bottleneck_values = sess.run(bottleneck_tensor,
{resized_input_tensor: distorted_image_data})
bottleneck_values = np.squeeze(bottleneck_values)
ground_truth = np.zeros(class_count, dtype=np.float32)
ground_truth[label_index] = 1.0
bottlenecks.append(bottleneck_values)
ground_truths.append(ground_truth)
return bottlenecks, ground_truths
def should_distort_images(flip_left_right, random_crop, random_scale,
random_brightness):
"""Whether any distortions are enabled, from the input flags.
Args:
flip_left_right: Boolean whether to randomly mirror images horizontally.
random_crop: Integer percentage setting the total margin used around the
crop box.
random_scale: Integer percentage of how much to vary the scale by.
random_brightness: Integer range to randomly multiply the pixel values by.
Returns:
Boolean value indicating whether any distortions should be applied.
"""
return (flip_left_right or (random_crop != 0) or (random_scale != 0) or
(random_brightness != 0))
def add_input_distortions(flip_left_right, random_crop, random_scale,
random_brightness, input_width, input_height,
input_depth, input_mean, input_std):
"""Creates the operations to apply the specified distortions.
During training it can help to improve the results if we run the images
through simple distortions like crops, scales, and flips. These reflect the
kind of variations we expect in the real world, and so can help train the
model to cope with natural data more effectively. Here we take the supplied
parameters and construct a network of operations to apply them to an image.
Cropping
~~~~~~~~
Cropping is done by placing a bounding box at a random position in the full
image. The cropping parameter controls the size of that box relative to the
input image. If it's zero, then the box is the same size as the input and no
cropping is performed. If the value is 50%, then the crop box will be half the
width and height of the input. In a diagram it looks like this:
< width >
+---------------------+
| |
| width - crop% |
| < > |
| +------+ |
| | | |
| | | |
| | | |
| +------+ |
| |
| |
+---------------------+
Scaling
~~~~~~~
Scaling is a lot like cropping, except that the bounding box is always
centered and its size varies randomly within the given range. For example if
the scale percentage is zero, then the bounding box is the same size as the
input and no scaling is applied. If it's 50%, then the bounding box will be in
a random range between half the width and height and full size.
Args:
flip_left_right: Boolean whether to randomly mirror images horizontally.
random_crop: Integer percentage setting the total margin used around the
crop box.
random_scale: Integer percentage of how much to vary the scale by.
random_brightness: Integer range to randomly multiply the pixel values by.
graph.
input_width: Horizontal size of expected input image to model.
input_height: Vertical size of expected input image to model.
input_depth: How many channels the expected input image should have.
input_mean: Pixel value that should be zero in the image for the graph.
input_std: How much to divide the pixel values by before recognition.
Returns:
The jpeg input layer and the distorted result tensor.
"""
jpeg_data = tf.placeholder(tf.string, name='DistortJPGInput')
decoded_image = tf.image.decode_jpeg(jpeg_data, channels=input_depth)
decoded_image_as_float = tf.cast(decoded_image, dtype=tf.float32)
decoded_image_4d = tf.expand_dims(decoded_image_as_float, 0)
margin_scale = 1.0 + (random_crop / 100.0)
resize_scale = 1.0 + (random_scale / 100.0)
margin_scale_value = tf.constant(margin_scale)
resize_scale_value = tf.random_uniform(tensor_shape.scalar(),
minval=1.0,
maxval=resize_scale)
scale_value = tf.multiply(margin_scale_value, resize_scale_value)
precrop_width = tf.multiply(scale_value, input_width)
precrop_height = tf.multiply(scale_value, input_height)
precrop_shape = tf.stack([precrop_height, precrop_width])
precrop_shape_as_int = tf.cast(precrop_shape, dtype=tf.int32)
precropped_image = tf.image.resize_bilinear(decoded_image_4d,
precrop_shape_as_int)
precropped_image_3d = tf.squeeze(precropped_image, squeeze_dims=[0])
cropped_image = tf.random_crop(precropped_image_3d,
[input_height, input_width, input_depth])
if flip_left_right:
flipped_image = tf.image.random_flip_left_right(cropped_image)
else:
flipped_image = cropped_image
brightness_min = 1.0 - (random_brightness / 100.0)
brightness_max = 1.0 + (random_brightness / 100.0)
brightness_value = tf.random_uniform(tensor_shape.scalar(),
minval=brightness_min,
maxval=brightness_max)
brightened_image = tf.multiply(flipped_image, brightness_value)
offset_image = tf.subtract(brightened_image, input_mean)
mul_image = tf.multiply(offset_image, 1.0 / input_std)
distort_result = tf.expand_dims(mul_image, 0, name='DistortResult')
return jpeg_data, distort_result
def variable_summaries(var):
"""Attach a lot of summaries to a Tensor (for TensorBoard visualization)."""
with tf.name_scope('summaries'):
mean = tf.reduce_mean(var)
tf.summary.scalar('mean', mean)
with tf.name_scope('stddev'):
stddev = tf.sqrt(tf.reduce_mean(tf.square(var - mean)))
tf.summary.scalar('stddev', stddev)
tf.summary.scalar('max', tf.reduce_max(var))
tf.summary.scalar('min', tf.reduce_min(var))
tf.summary.histogram('histogram', var)
def add_final_training_ops(class_count, final_tensor_name, bottleneck_tensor,
bottleneck_tensor_size):
"""Adds a new softmax and fully-connected layer for training.
We need to retrain the top layer to identify our new classes, so this function
adds the right operations to the graph, along with some variables to hold the
weights, and then sets up all the gradients for the backward pass.
The set up for the softmax and fully-connected layers is based on:
https://www.tensorflow.org/versions/master/tutorials/mnist/beginners/index.html
Args:
class_count: Integer of how many categories of things we're trying to
recognize.
final_tensor_name: Name string for the new final node that produces results.
bottleneck_tensor: The output of the main CNN graph.
bottleneck_tensor_size: How many entries in the bottleneck vector.
Returns:
The tensors for the training and cross entropy results, and tensors for the
bottleneck input and ground truth input.
"""
with tf.name_scope('input'):
bottleneck_input = tf.placeholder_with_default(
bottleneck_tensor,
shape=[None, bottleneck_tensor_size],
name='BottleneckInputPlaceholder')
ground_truth_input = tf.placeholder(tf.float32,
[None, class_count],
name='GroundTruthInput')
# Organizing the following ops as `final_training_ops` so they're easier
# to see in TensorBoard
layer_name = 'final_training_ops'
with tf.name_scope(layer_name):
with tf.name_scope('weights'):
initial_value = tf.truncated_normal(
[bottleneck_tensor_size, class_count], stddev=0.001)
layer_weights = tf.Variable(initial_value, name='final_weights')
variable_summaries(layer_weights)
with tf.name_scope('biases'):
layer_biases = tf.Variable(tf.zeros([class_count]), name='final_biases')
variable_summaries(layer_biases)
with tf.name_scope('Wx_plus_b'):
logits = tf.matmul(bottleneck_input, layer_weights) + layer_biases
tf.summary.histogram('pre_activations', logits)
final_tensor = tf.nn.softmax(logits, name=final_tensor_name)
tf.summary.histogram('activations', final_tensor)
with tf.name_scope('cross_entropy'):
cross_entropy = tf.nn.softmax_cross_entropy_with_logits(
labels=ground_truth_input, logits=logits)
with tf.name_scope('total'):
cross_entropy_mean = tf.reduce_mean(cross_entropy)
tf.summary.scalar('cross_entropy', cross_entropy_mean)
with tf.name_scope('train'):
optimizer = tf.train.GradientDescentOptimizer(FLAGS.learning_rate)
train_step = optimizer.minimize(cross_entropy_mean)
return (train_step, cross_entropy_mean, bottleneck_input, ground_truth_input,
final_tensor)
def add_evaluation_step(result_tensor, ground_truth_tensor):
"""Inserts the operations we need to evaluate the accuracy of our results.
Args:
result_tensor: The new final node that produces results.
ground_truth_tensor: The node we feed ground truth data
into.
Returns:
Tuple of (evaluation step, prediction).
"""
with tf.name_scope('accuracy'):
with tf.name_scope('correct_prediction'):
prediction = tf.argmax(result_tensor, 1)
correct_prediction = tf.equal(
prediction, tf.argmax(ground_truth_tensor, 1))
with tf.name_scope('accuracy'):
evaluation_step = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
tf.summary.scalar('accuracy', evaluation_step)
return evaluation_step, prediction
def save_graph_to_file(sess, graph, graph_file_name):
output_graph_def = graph_util.convert_variables_to_constants(
sess, graph.as_graph_def(), [FLAGS.final_tensor_name])
with gfile.FastGFile(graph_file_name, 'wb') as f:
f.write(output_graph_def.SerializeToString())
return
def prepare_file_system():
# Setup the directory we'll write summaries to for TensorBoard
if tf.gfile.Exists(FLAGS.summaries_dir):
tf.gfile.DeleteRecursively(FLAGS.summaries_dir)
tf.gfile.MakeDirs(FLAGS.summaries_dir)
if FLAGS.intermediate_store_frequency > 0:
ensure_dir_exists(FLAGS.intermediate_output_graphs_dir)
return
def create_model_info(architecture):
"""Given the name of a model architecture, returns information about it.
There are different base image recognition pretrained models that can be
retrained using transfer learning, and this function translates from the name
of a model to the attributes that are needed to download and train with it.
Args:
architecture: Name of a model architecture.
Returns:
Dictionary of information about the model, or None if the name isn't
recognized
Raises:
ValueError: If architecture name is unknown.
"""
architecture = architecture.lower()
if architecture == 'inception_v3':
# pylint: disable=line-too-long
data_url = 'http://download.tensorflow.org/models/image/imagenet/inception-2015-12-05.tgz'
# pylint: enable=line-too-long
bottleneck_tensor_name = 'pool_3/_reshape:0'
bottleneck_tensor_size = 2048
input_width = 299
input_height = 299
input_depth = 3
resized_input_tensor_name = 'Mul:0'
model_file_name = 'classify_image_graph_def.pb'
input_mean = 128
input_std = 128
elif architecture.startswith('mobilenet_'):
parts = architecture.split('_')
if len(parts) != 3 and len(parts) != 4:
tf.logging.error("Couldn't understand architecture name '%s'",
architecture)
return None
version_string = parts[1]
if (version_string != '1.0' and version_string != '0.75' and
version_string != '0.50' and version_string != '0.25'):
tf.logging.error(
""""The Mobilenet version should be '1.0', '0.75', '0.50', or '0.25',
but found '%s' for architecture '%s'""",
version_string, architecture)
return None
size_string = parts[2]
if (size_string != '224' and size_string != '192' and
size_string != '160' and size_string != '128'):
tf.logging.error(
"""The Mobilenet input size should be '224', '192', '160', or '128',
but found '%s' for architecture '%s'""",
size_string, architecture)
return None
if len(parts) == 3:
is_quantized = False
else:
if parts[3] != 'quantized':
tf.logging.error(
"Couldn't understand architecture suffix '%s' for '%s'", parts[3],
architecture)
return None
is_quantized = True
data_url = 'http://download.tensorflow.org/models/mobilenet_v1_'
data_url += version_string + '_' + size_string + '_frozen.tgz'
bottleneck_tensor_name = 'MobilenetV1/Predictions/Reshape:0'
bottleneck_tensor_size = 1001
input_width = int(size_string)
input_height = int(size_string)
input_depth = 3
resized_input_tensor_name = 'input:0'
if is_quantized:
model_base_name = 'quantized_graph.pb'
else:
model_base_name = 'frozen_graph.pb'
model_dir_name = 'mobilenet_v1_' + version_string + '_' + size_string
model_file_name = os.path.join(model_dir_name, model_base_name)
input_mean = 127.5
input_std = 127.5
else:
tf.logging.error("Couldn't understand architecture name '%s'", architecture)
raise ValueError('Unknown architecture', architecture)
return {
'data_url': data_url,
'bottleneck_tensor_name': bottleneck_tensor_name,
'bottleneck_tensor_size': bottleneck_tensor_size,
'input_width': input_width,
'input_height': input_height,
'input_depth': input_depth,
'resized_input_tensor_name': resized_input_tensor_name,
'model_file_name': model_file_name,
'input_mean': input_mean,
'input_std': input_std,
}
def add_jpeg_decoding(input_width, input_height, input_depth, input_mean,
input_std):
"""Adds operations that perform JPEG decoding and resizing to the graph..
Args:
input_width: Desired width of the image fed into the recognizer graph.
input_height: Desired width of the image fed into the recognizer graph.
input_depth: Desired channels of the image fed into the recognizer graph.
input_mean: Pixel value that should be zero in the image for the graph.
input_std: How much to divide the pixel values by before recognition.
Returns:
Tensors for the node to feed JPEG data into, and the output of the
preprocessing steps.
"""
jpeg_data = tf.placeholder(tf.string, name='DecodeJPGInput')
decoded_image = tf.image.decode_jpeg(jpeg_data, channels=input_depth)
decoded_image_as_float = tf.cast(decoded_image, dtype=tf.float32)
decoded_image_4d = tf.expand_dims(decoded_image_as_float, 0)
resize_shape = tf.stack([input_height, input_width])
resize_shape_as_int = tf.cast(resize_shape, dtype=tf.int32)
resized_image = tf.image.resize_bilinear(decoded_image_4d,
resize_shape_as_int)
offset_image = tf.subtract(resized_image, input_mean)
mul_image = tf.multiply(offset_image, 1.0 / input_std)
return jpeg_data, mul_image
def main(_):
# Needed to make sure the logging output is visible.
# See https://github.com/tensorflow/tensorflow/issues/3047
tf.logging.set_verbosity(tf.logging.INFO)
# Prepare necessary directories that can be used during training
prepare_file_system()
# Gather information about the model architecture we'll be using.
model_info = create_model_info(FLAGS.architecture)
if not model_info:
tf.logging.error('Did not recognize architecture flag')
return -1
# Set up the pre-trained graph.
maybe_download_and_extract(model_info['data_url'])
graph, bottleneck_tensor, resized_image_tensor = (
create_model_graph(model_info))
# Look at the folder structure, and create lists of all the images.
image_lists = create_image_lists(FLAGS.image_dir, FLAGS.testing_percentage,
FLAGS.validation_percentage)
class_count = len(image_lists.keys())
if class_count == 0:
tf.logging.error('No valid folders of images found at ' + FLAGS.image_dir)
return -1
if class_count == 1:
tf.logging.error('Only one valid folder of images found at ' +
FLAGS.image_dir +
' - multiple classes are needed for classification.')
return -1
# See if the command-line flags mean we're applying any distortions.
do_distort_images = should_distort_images(
FLAGS.flip_left_right, FLAGS.random_crop, FLAGS.random_scale,
FLAGS.random_brightness)
with tf.Session(graph=graph) as sess:
# Set up the image decoding sub-graph.
jpeg_data_tensor, decoded_image_tensor = add_jpeg_decoding(
model_info['input_width'], model_info['input_height'],
model_info['input_depth'], model_info['input_mean'],
model_info['input_std'])
if do_distort_images:
# We will be applying distortions, so setup the operations we'll need.
(distorted_jpeg_data_tensor,
distorted_image_tensor) = add_input_distortions(
FLAGS.flip_left_right, FLAGS.random_crop, FLAGS.random_scale,
FLAGS.random_brightness, model_info['input_width'],
model_info['input_height'], model_info['input_depth'],
model_info['input_mean'], model_info['input_std'])
else:
# We'll make sure we've calculated the 'bottleneck' image summaries and
# cached them on disk.
cache_bottlenecks(sess, image_lists, FLAGS.image_dir,
FLAGS.bottleneck_dir, jpeg_data_tensor,
decoded_image_tensor, resized_image_tensor,
bottleneck_tensor, FLAGS.architecture)
# Add the new layer that we'll be training.
(train_step, cross_entropy, bottleneck_input, ground_truth_input,
final_tensor) = add_final_training_ops(
len(image_lists.keys()), FLAGS.final_tensor_name, bottleneck_tensor,
model_info['bottleneck_tensor_size'])
# Create the operations we need to evaluate the accuracy of our new layer.
evaluation_step, prediction = add_evaluation_step(
final_tensor, ground_truth_input)
# Merge all the summaries and write them out to the summaries_dir
merged = tf.summary.merge_all()
train_writer = tf.summary.FileWriter(FLAGS.summaries_dir + '/train',
sess.graph)
validation_writer = tf.summary.FileWriter(
FLAGS.summaries_dir + '/validation')
# Set up all our weights to their initial default values.
init = tf.global_variables_initializer()
sess.run(init)
# Run the training for as many cycles as requested on the command line.
for i in range(FLAGS.how_many_training_steps):
# Get a batch of input bottleneck values, either calculated fresh every
# time with distortions applied, or from the cache stored on disk.
if do_distort_images:
(train_bottlenecks,
train_ground_truth) = get_random_distorted_bottlenecks(
sess, image_lists, FLAGS.train_batch_size, 'training',
FLAGS.image_dir, distorted_jpeg_data_tensor,
distorted_image_tensor, resized_image_tensor, bottleneck_tensor)
else:
(train_bottlenecks,
train_ground_truth, _) = get_random_cached_bottlenecks(
sess, image_lists, FLAGS.train_batch_size, 'training',
FLAGS.bottleneck_dir, FLAGS.image_dir, jpeg_data_tensor,
decoded_image_tensor, resized_image_tensor, bottleneck_tensor,
FLAGS.architecture)
# Feed the bottlenecks and ground truth into the graph, and run a training
# step. Capture training summaries for TensorBoard with the `merged` op.
train_summary, _ = sess.run(
[merged, train_step],
feed_dict={bottleneck_input: train_bottlenecks,
ground_truth_input: train_ground_truth})
train_writer.add_summary(train_summary, i)
# Every so often, print out how well the graph is training.
is_last_step = (i + 1 == FLAGS.how_many_training_steps)
if (i % FLAGS.eval_step_interval) == 0 or is_last_step:
train_accuracy, cross_entropy_value = sess.run(
[evaluation_step, cross_entropy],
feed_dict={bottleneck_input: train_bottlenecks,
ground_truth_input: train_ground_truth})
tf.logging.info('%s: Step %d: Train accuracy = %.1f%%' %
(datetime.now(), i, train_accuracy * 100))
tf.logging.info('%s: Step %d: Cross entropy = %f' %
(datetime.now(), i, cross_entropy_value))
validation_bottlenecks, validation_ground_truth, _ = (
get_random_cached_bottlenecks(
sess, image_lists, FLAGS.validation_batch_size, 'validation',
FLAGS.bottleneck_dir, FLAGS.image_dir, jpeg_data_tensor,
decoded_image_tensor, resized_image_tensor, bottleneck_tensor,
FLAGS.architecture))
# Run a validation step and capture training summaries for TensorBoard
# with the `merged` op.
validation_summary, validation_accuracy = sess.run(
[merged, evaluation_step],
feed_dict={bottleneck_input: validation_bottlenecks,
ground_truth_input: validation_ground_truth})
validation_writer.add_summary(validation_summary, i)
tf.logging.info('%s: Step %d: Validation accuracy = %.1f%% (N=%d)' %
(datetime.now(), i, validation_accuracy * 100,
len(validation_bottlenecks)))
# Store intermediate results
intermediate_frequency = FLAGS.intermediate_store_frequency
if (intermediate_frequency > 0 and (i % intermediate_frequency == 0)
and i > 0):
intermediate_file_name = (FLAGS.intermediate_output_graphs_dir +
'intermediate_' + str(i) + '.pb')
tf.logging.info('Save intermediate result to : ' +
intermediate_file_name)
save_graph_to_file(sess, graph, intermediate_file_name)
# We've completed all our training, so run a final test evaluation on
# some new images we haven't used before.
test_bottlenecks, test_ground_truth, test_filenames = (
get_random_cached_bottlenecks(
sess, image_lists, FLAGS.test_batch_size, 'testing',
FLAGS.bottleneck_dir, FLAGS.image_dir, jpeg_data_tensor,
decoded_image_tensor, resized_image_tensor, bottleneck_tensor,
FLAGS.architecture))
test_accuracy, predictions = sess.run(
[evaluation_step, prediction],
feed_dict={bottleneck_input: test_bottlenecks,
ground_truth_input: test_ground_truth})
tf.logging.info('Final test accuracy = %.1f%% (N=%d)' %
(test_accuracy * 100, len(test_bottlenecks)))
if FLAGS.print_misclassified_test_images:
tf.logging.info('=== MISCLASSIFIED TEST IMAGES ===')
for i, test_filename in enumerate(test_filenames):
if predictions[i] != test_ground_truth[i].argmax():
tf.logging.info('%70s %s' %
(test_filename,
list(image_lists.keys())[predictions[i]]))
# Write out the trained graph and labels with the weights stored as
# constants.
save_graph_to_file(sess, graph, FLAGS.output_graph)
with gfile.FastGFile(FLAGS.output_labels, 'w') as f:
f.write('\n'.join(image_lists.keys()) + '\n')
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'--image_dir',
type=str,
default='',
help='Path to folders of labeled images.'
)
parser.add_argument(
'--output_graph',
type=str,
default='/tmp/output_graph.pb',
help='Where to save the trained graph.'
)
parser.add_argument(
'--intermediate_output_graphs_dir',
type=str,
default='/tmp/intermediate_graph/',
help='Where to save the intermediate graphs.'
)
parser.add_argument(
'--intermediate_store_frequency',
type=int,
default=0,
help="""\
How many steps to store intermediate graph. If "0" then will not
store.\
"""
)
parser.add_argument(
'--output_labels',
type=str,
default='/tmp/output_labels.txt',
help='Where to save the trained graph\'s labels.'
)
parser.add_argument(
'--summaries_dir',
type=str,
default='/tmp/retrain_logs',
help='Where to save summary logs for TensorBoard.'
)
parser.add_argument(
'--how_many_training_steps',
type=int,
default=6000,
help='How many training steps to run before ending.'
)
parser.add_argument(
'--learning_rate',
type=float,
default=0.01,
help='How large a learning rate to use when training.'
)
parser.add_argument(
'--testing_percentage',
type=int,
default=10,
help='What percentage of images to use as a test set.'
)
parser.add_argument(
'--validation_percentage',
type=int,
default=10,
help='What percentage of images to use as a validation set.'
)
parser.add_argument(
'--eval_step_interval',
type=int,
default=10,
help='How often to evaluate the training results.'
)
parser.add_argument(
'--train_batch_size',
type=int,
default=100,
help='How many images to train on at a time.'
)
parser.add_argument(
'--test_batch_size',
type=int,
default=-1,
help="""\
How many images to test on. This test set is only used once, to evaluate
the final accuracy of the model after training completes.
A value of -1 causes the entire test set to be used, which leads to more
stable results across runs.\
"""
)
parser.add_argument(
'--validation_batch_size',
type=int,
default=100,
help="""\
How many images to use in an evaluation batch. This validation set is
used much more often than the test set, and is an early indicator of how
accurate the model is during training.
A value of -1 causes the entire validation set to be used, which leads to
more stable results across training iterations, but may be slower on large
training sets.\
"""
)
parser.add_argument(
'--print_misclassified_test_images',
default=False,
help="""\
Whether to print out a list of all misclassified test images.\
""",
action='store_true'
)
parser.add_argument(
'--model_dir',
type=str,
default='/tmp/imagenet',
help="""\
Path to classify_image_graph_def.pb,
imagenet_synset_to_human_label_map.txt, and
imagenet_2012_challenge_label_map_proto.pbtxt.\
"""
)
parser.add_argument(
'--bottleneck_dir',
type=str,
default='/tmp/bottleneck',
help='Path to cache bottleneck layer values as files.'
)
parser.add_argument(
'--final_tensor_name',
type=str,
default='final_result',
help="""\
The name of the output classification layer in the retrained graph.\
"""
)
parser.add_argument(
'--flip_left_right',
default=False,
help="""\
Whether to randomly flip half of the training images horizontally.\
""",
action='store_true'
)
parser.add_argument(
'--random_crop',
type=int,
default=0,
help="""\
A percentage determining how much of a margin to randomly crop off the
training images.\
"""
)
parser.add_argument(
'--random_scale',
type=int,
default=0,
help="""\
A percentage determining how much to randomly scale up the size of the
training images by.\
"""
)
parser.add_argument(
'--random_brightness',
type=int,
default=0,
help="""\
A percentage determining how much to randomly multiply the training image
input pixels up or down by.\
"""
)
parser.add_argument(
'--architecture',
type=str,
default='inception_v3',
help="""\
Which model architecture to use. 'inception_v3' is the most accurate, but
also the slowest. For faster or smaller models, chose a MobileNet with the
form 'mobilenet_<parameter size>_<input_size>[_quantized]'. For example,
'mobilenet_1.0_224' will pick a model that is 17 MB in size and takes 224
pixel input images, while 'mobilenet_0.25_128_quantized' will choose a much
less accurate, but smaller and faster network that's 920 KB on disk and
takes 128x128 images. See https://research.googleblog.com/2017/06/mobilenets-open-source-models-for.html
for more information on Mobilenet.\
""")
FLAGS, unparsed = parser.parse_known_args()
tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)
|
[
"noreply@github.com"
] |
AbdesslemOuerghemmi.noreply@github.com
|
01fcff1996438106ed4f432447a0a2790c2a95ff
|
e702dc1b5f37c72c49b550f9ea48c369d432fd12
|
/collection相关用法/defaultdict.py
|
dd914b40d9b06e610ef179bc5477fbd1fb975019
|
[] |
no_license
|
HIT-jixiyang/offer
|
bf0fee0a410c8b55ce66da2cc0e186ad04ccdf1d
|
486069be6f6c554f2853e1d71979a6747ca0a985
|
refs/heads/master
| 2022-12-15T09:16:48.055439
| 2020-09-20T07:47:11
| 2020-09-20T07:47:11
| 297,027,242
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 289
|
py
|
from collections import defaultdict
dict1 = defaultdict(int)
dict2 = defaultdict(set)
dict3 = defaultdict(str)
dict4 = defaultdict(list)
dict1[2] ='two'
print(dict1[1])
print(dict2[1])
print(dict3[1])
print(dict4[1])
dict1[0]=1
print(dict1[3])
print(dict1.items())
print(dict1.popitem())
|
[
"jixiyang_hit@163.com"
] |
jixiyang_hit@163.com
|
03a2ffcfc9ab66c8eb58b34e9a16f24bc0741915
|
9f8d4011beca8b4e1a1b73af40ef4eb96ec2cbc2
|
/build/lib.linux-x86_64-2.7/ims/migrations/0005_auto_20150913_0927.py
|
dc468a7024661dd32721bdae38577ad693072d6a
|
[
"BSD-3-Clause"
] |
permissive
|
grovesr/django-ims
|
5650e38ae3ac9c8109fa370117b7b805a958123c
|
2e3046c7dac45831879ff578675f6c5234226f95
|
refs/heads/master
| 2022-12-05T00:14:46.068546
| 2019-09-12T15:10:46
| 2019-09-12T15:10:46
| 39,070,579
| 0
| 0
|
BSD-3-Clause
| 2022-11-22T00:45:50
| 2015-07-14T11:19:28
|
Python
|
UTF-8
|
Python
| false
| false
| 885
|
py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('ims', '0004_auto_20150912_0647'),
]
operations = [
migrations.CreateModel(
name='ProductCategory',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('category', models.CharField(default=b'', max_length=100)),
],
options={
},
bases=(models.Model,),
),
migrations.AddField(
model_name='productinformation',
name='category',
field=models.OneToOneField(null=True, blank=True, to='ims.ProductCategory', help_text=b'Category'),
preserve_default=True,
),
]
|
[
"robgroves0.gmailcom"
] |
robgroves0.gmailcom
|
f668a22354ea64ce662c8495e2344dc4f2584c25
|
75e358b563b17ded3c1396055937adc3caaa7407
|
/django_jsonfield_backport/apps.py
|
2e80922e7412ca656730b8b75564b651801472d7
|
[] |
no_license
|
bflower9/heliohostPack3.7
|
86a923ba13ab3d645fec5cb041d96bce913aa610
|
2b0a2ddde96d50cf93751256c79d6a005403095e
|
refs/heads/main
| 2023-02-19T01:30:15.020439
| 2021-01-24T00:23:43
| 2021-01-24T00:23:43
| 330,469,501
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 450
|
py
|
import django
from django.apps import AppConfig
from django.utils.translation import ugettext_lazy as _
from django_jsonfield_backport import features, models
class JSONFieldConfig(AppConfig):
name = "django_jsonfield_backport"
verbose_name = _("JSONField backport from Django 3.1")
def ready(self):
if django.VERSION >= (3, 1):
return
features.connect_signal_receivers()
models.register_lookups()
|
[
"mahmudula2000@gmail.com"
] |
mahmudula2000@gmail.com
|
2456ff4041186f6715f8539ddd562a3fde99db39
|
3ec711a6c756ef79e567c39031d95fb5d6a4653e
|
/shapes.py
|
4c32491a2ddd581cbd63a6b7c38242428e0f4b47
|
[] |
no_license
|
jamcoy/Game_of_Life
|
37274fd4ee668baa54cababa4ef53cdb7a7f363e
|
236ed4d70f851c1afc8e3dfaef0a073edafa867a
|
refs/heads/master
| 2021-01-12T15:30:54.522198
| 2016-10-26T07:03:16
| 2016-10-26T07:03:16
| 71,797,169
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 198
|
py
|
boat = """00000
01100
01010
00100
00000"""
beacon = """000000
011000
011000
000110
000110
000000"""
blinker = """00000
00000
01110
00000
00000"""
glider = """000000
010100
001100
001000
000000"""
|
[
"james@coynemail.net"
] |
james@coynemail.net
|
6a5bc1b04d063fb91936896dda1245ccf57096ba
|
500b03fa6cb776c1d51db4a3a3aa252ddf5a50e6
|
/book_exercise/crash/Chapter 11: Testing Codes/test_cities.py
|
8f25580e00267b30f7fb53ed979e1e4b9b772c82
|
[] |
no_license
|
carloslvm/learning-python
|
b3796a0a5b751baae8c551a9f6fe262f98980691
|
07f885454cf21b7d215a58da7fcb907715e546bd
|
refs/heads/master
| 2022-07-27T21:39:11.937801
| 2022-07-09T17:47:56
| 2022-07-09T17:47:56
| 163,447,616
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 445
|
py
|
#!/usr/bin/python3
import unittest
from city_functions import city_country
class cityTestCase(unittest.TestCase):
def test_cities(self):
_location_ = city_country('santiago','chile')
self.assertEqual(_location_, 'Santiago, Chile')
def test_population(self):
location_population = city_country('beijing', 'china', 100000)
self.assertEqual(location_population, 'Beijing, China: 100000')
unittest.main()
|
[
"cvaldez553@gmail.com"
] |
cvaldez553@gmail.com
|
d795cd341cfb9ade40d9854b34b82278ffc091d3
|
8f24e443e42315a81028b648e753c50967c51c78
|
/rllib/utils/gym.py
|
7a763400c9de97d74cf42e48451c0195c36fb597
|
[
"MIT",
"BSD-3-Clause",
"Apache-2.0"
] |
permissive
|
simon-mo/ray
|
d07efdada8d05c6e10417f96e8dfc35f9ad33397
|
1e42e6cd15e2fb96c217cba8484e59ed0ef4b0c8
|
refs/heads/master
| 2023-03-06T00:09:35.758834
| 2022-12-23T18:46:48
| 2022-12-23T18:46:48
| 122,156,396
| 4
| 2
|
Apache-2.0
| 2023-03-04T08:56:56
| 2018-02-20T04:47:06
|
Python
|
UTF-8
|
Python
| false
| false
| 2,754
|
py
|
import gymnasium as gym
from typing import Optional
from ray.util.annotations import DeveloperAPI
@DeveloperAPI
def check_old_gym_env(
env: Optional[gym.Env] = None, *, step_results=None, reset_results=None
):
# Check `reset()` results.
if reset_results is not None:
if (
# Result is NOT a tuple?
not isinstance(reset_results, tuple)
# Result is a tuple of len!=2?
or len(reset_results) != 2
# The second item is a NOT dict (infos)?
or not isinstance(reset_results[1], dict)
# Result is a tuple of len=2 and the second item is a dict (infos) and
# our env does NOT have obs space 2-Tuple with the second space being a
# dict?
or (
env
and isinstance(env.observation_space, gym.spaces.Tuple)
and len(env.observation_space.spaces) >= 2
and isinstance(env.observation_space.spaces[1], gym.spaces.Dict)
)
):
raise ValueError
# Check `step()` results.
elif step_results is not None:
if len(step_results) == 4:
return ValueError
elif len(step_results) == 5:
return False
else:
raise ValueError(
"The number of values returned from `gym.Env.step([action])` must be "
"5 (new gym.Env API including `truncated` flags)! Make sure your "
"`step()` method returns: [obs], [reward], [terminated], "
"[truncated], and [infos]!"
)
else:
raise AttributeError(
"Either `step_results` or `reset_results` most be provided to "
"`check_old_gym_env()`!"
)
return False
@DeveloperAPI
def convert_old_gym_space_to_gymnasium_space(space) -> gym.Space:
"""Converts an old gym (NOT gymnasium) Space into a gymnasium.Space.
Args:
space: The gym.Space to convert to gymnasium.Space.
Returns:
The converted gymnasium.space object.
"""
from ray.rllib.utils.serialization import gym_space_from_dict, gym_space_to_dict
return gym_space_from_dict(gym_space_to_dict(space))
@DeveloperAPI
def try_import_gymnasium_and_gym():
try:
import gymnasium as gym
except (ImportError, ModuleNotFoundError):
raise ImportError(
"The `gymnasium` package seems to be not installed! As of Ray 2.2, it is "
"required for RLlib. Try running `pip install gymnasium` from the "
"command line to fix this problem."
)
old_gym = None
try:
import gym as old_gym
except (ImportError, ModuleNotFoundError):
pass
return gym, old_gym
|
[
"noreply@github.com"
] |
simon-mo.noreply@github.com
|
16ba6ba79522db1c88ad4d2db458fc5da6f3144b
|
05beed78607892699db199dc28d4e0006df219d5
|
/ssd_pytorch/train.py
|
fdcc71e5e4e0ad9bdcb90b9d568189c54e85dc89
|
[
"MIT"
] |
permissive
|
lugf027/MineSaver
|
3430d8968d1f6ee9c94dc8a096228df4a96745e7
|
564f4137fc462f41d95b8588750c10daed869322
|
refs/heads/master
| 2022-11-27T23:09:33.357302
| 2020-08-06T23:30:14
| 2020-08-06T23:30:14
| 285,674,223
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 9,708
|
py
|
from data import *
from utils.augmentations import SSDAugmentation
from layers.modules import MultiBoxLoss
from ssd import build_ssd
import os
import sys
import time
import torch
from torch.autograd import Variable
import torch.nn as nn
import torch.optim as optim
import torch.backends.cudnn as cudnn
import torch.nn.init as init
import torch.utils.data as data
import numpy as np
import argparse
def str2bool(v):
return v.lower() in ("yes", "true", "t", "1")
parser = argparse.ArgumentParser(
description='Single Shot MultiBox Detector Training With Pytorch')
train_set = parser.add_mutually_exclusive_group()
parser.add_argument('--dataset', default='VOC', choices=['VOC', 'COCO'],
type=str, help='VOC or COCO')
parser.add_argument('--dataset_root', default=VOC_ROOT,
help='Dataset root directory path')
parser.add_argument('--basenet', default='vgg16_reducedfc.pth',
help='Pretrained base model')
#parser.add_argument('--batch_size', default=32, type=int,
parser.add_argument('--batch_size', default=1, type=int,
help='Batch size for training')
parser.add_argument('--resume', default=None, type=str,
help='Checkpoint state_dict file to resume training from')
parser.add_argument('--start_iter', default=30001, type=int,
help='Resume training at this iter')
parser.add_argument('--num_workers', default=4, type=int,
help='Number of workers used in dataloading')
parser.add_argument('--cuda', default=True, type=str2bool,
help='Use CUDA to train model')
#parser.add_argument('--lr', '--learning-rate', default=1e-3, type=float,
# help='initial learning rate')
parser.add_argument('--lr', '--learning-rate', default=1e-4, type=float,
help='initial learning rate')
parser.add_argument('--momentum', default=0.9, type=float,
help='Momentum value for optim')
parser.add_argument('--weight_decay', default=5e-4, type=float,
help='Weight decay for SGD')
parser.add_argument('--gamma', default=0.1, type=float,
help='Gamma update for SGD')
parser.add_argument('--visdom', default=False, type=str2bool,
help='Use visdom for loss visualization')
parser.add_argument('--save_folder', default='weights/',
help='Directory for saving checkpoint models')
args = parser.parse_args()
if torch.cuda.is_available():
if args.cuda:
torch.set_default_tensor_type('torch.cuda.FloatTensor')
if not args.cuda:
print("WARNING: It looks like you have a CUDA device, but aren't " +
"using CUDA.\nRun with --cuda for optimal training speed.")
torch.set_default_tensor_type('torch.FloatTensor')
else:
torch.set_default_tensor_type('torch.FloatTensor')
if not os.path.exists(args.save_folder):
os.mkdir(args.save_folder)
def train():
if args.dataset == 'COCO':
if args.dataset_root == VOC_ROOT:
if not os.path.exists(COCO_ROOT):
parser.error('Must specify dataset_root if specifying dataset')
print("WARNING: Using default COCO dataset_root because " +
"--dataset_root was not specified.")
args.dataset_root = COCO_ROOT
cfg = coco
dataset = COCODetection(root=args.dataset_root,
transform=SSDAugmentation(cfg['min_dim'],
MEANS))
elif args.dataset == 'VOC':
if args.dataset_root == COCO_ROOT:
parser.error('Must specify dataset if specifying dataset_root')
cfg = voc
dataset = VOCDetection(root=args.dataset_root,
transform=SSDAugmentation(cfg['min_dim'],
MEANS))
if args.visdom:
import visdom
global viz
viz = visdom.Visdom()
ssd_net = build_ssd('train', cfg['min_dim'], cfg['num_classes'])
net = ssd_net
if args.cuda:
net = torch.nn.DataParallel(ssd_net)
cudnn.benchmark = True
if args.resume:
print('Resuming training, loading {}...'.format(args.resume))
ssd_net.load_weights(args.resume)
else:
vgg_weights = torch.load(args.save_folder + args.basenet)
print('Loading base network...')
ssd_net.vgg.load_state_dict(vgg_weights)
if args.cuda:
net = net.cuda()
if not args.resume:
print('Initializing weights...')
# initialize newly added layers' weights with xavier method
ssd_net.extras.apply(weights_init)
ssd_net.loc.apply(weights_init)
ssd_net.conf.apply(weights_init)
optimizer = optim.SGD(net.parameters(), lr=args.lr, momentum=args.momentum,
weight_decay=args.weight_decay)
criterion = MultiBoxLoss(cfg['num_classes'], 0.5, True, 0, True, 3, 0.5,
False, args.cuda)
net.train()
# loss counters
loc_loss = 0
conf_loss = 0
epoch = 0
print('Loading the dataset...')
epoch_size = len(dataset) // args.batch_size
print('Training SSD on:', dataset.name)
print('Using the specified args:')
print(args)
step_index = 0
if args.visdom:
vis_title = 'SSD.PyTorch on ' + dataset.name
vis_legend = ['Loc Loss', 'Conf Loss', 'Total Loss']
iter_plot = create_vis_plot('Iteration', 'Loss', vis_title, vis_legend)
epoch_plot = create_vis_plot('Epoch', 'Loss', vis_title, vis_legend)
data_loader = data.DataLoader(dataset, args.batch_size,
num_workers=args.num_workers,
shuffle=True, collate_fn=detection_collate,
pin_memory=True)
# create batch iterator
batch_iterator = iter(data_loader)
for iteration in range(args.start_iter, cfg['max_iter']):
if args.visdom and iteration != 0 and (iteration % epoch_size == 0):
update_vis_plot(epoch, loc_loss, conf_loss, epoch_plot, None,
'append', epoch_size)
# reset epoch loss counters
loc_loss = 0
conf_loss = 0
epoch += 1
if iteration in cfg['lr_steps']:
step_index += 1
adjust_learning_rate(optimizer, args.gamma, step_index)
# load train data
# images, targets = next(batch_iterator)
try:
images, targets = next(batch_iterator)
except StopIteration:
batch_iterator = iter(data_loader)
images, targets = next(batch_iterator)
if args.cuda:
images = Variable(images.cuda())
targets = [Variable(ann.cuda(), volatile=True) for ann in targets]
else:
images = Variable(images)
targets = [Variable(ann, volatile=True) for ann in targets]
# forward
t0 = time.time()
out = net(images)
# backprop
optimizer.zero_grad()
loss_l, loss_c = criterion(out, targets)
loss = loss_l + loss_c
loss.backward()
optimizer.step()
t1 = time.time()
#loc_loss += loss_l.data[0]
#conf_loss += loss_c.data[0]
loc_loss += loss_l.item()
conf_loss += loss_c.item()
if iteration % 10 == 0:
print('timer: %.4f sec.' % (t1 - t0))
#print('iter ' + repr(iteration) + ' || Loss: %.4f ||' % (loss.data[0]), end=' ')
print('iter ' + repr(iteration) + ' || Loss: %.4f ||' % (loss.item()), end=' ')
if args.visdom:
# update_vis_plot(iteration, loss_l.data[0], loss_c.data[0],
update_vis_plot(iteration, loss_l.item(), loss_c.item(),
iter_plot, epoch_plot, 'append')
if iteration != 0 and iteration % 5000 == 0:
print('Saving state, iter:', iteration)
torch.save(ssd_net.state_dict(), 'weights/ssd300_COCO_' +
repr(iteration) + '.pth')
torch.save(ssd_net.state_dict(),
args.save_folder + '' + args.dataset + '.pth')
def adjust_learning_rate(optimizer, gamma, step):
"""Sets the learning rate to the initial LR decayed by 10 at every
specified step
# Adapted from PyTorch Imagenet example:
# https://github.com/pytorch/examples/blob/master/imagenet/main.py
"""
lr = args.lr * (gamma ** (step))
for param_group in optimizer.param_groups:
param_group['lr'] = lr
def xavier(param):
init.xavier_uniform(param)
def weights_init(m):
if isinstance(m, nn.Conv2d):
xavier(m.weight.data)
m.bias.data.zero_()
def create_vis_plot(_xlabel, _ylabel, _title, _legend):
return viz.line(
X=torch.zeros((1,)).cpu(),
Y=torch.zeros((1, 3)).cpu(),
opts=dict(
xlabel=_xlabel,
ylabel=_ylabel,
title=_title,
legend=_legend
)
)
def update_vis_plot(iteration, loc, conf, window1, window2, update_type,
epoch_size=1):
viz.line(
X=torch.ones((1, 3)).cpu() * iteration,
Y=torch.Tensor([loc, conf, loc + conf]).unsqueeze(0).cpu() / epoch_size,
win=window1,
update=update_type
)
# initialize epoch plot on first iteration
if iteration == 0:
viz.line(
X=torch.zeros((1, 3)).cpu(),
Y=torch.Tensor([loc, conf, loc + conf]).unsqueeze(0).cpu(),
win=window2,
update=True
)
if __name__ == '__main__':
train()
|
[
"161230578@qq.com"
] |
161230578@qq.com
|
cb98d277a37629212ee122fcf3ef6d0dd7231947
|
53896f6339d20b806424bfa1a51f6fc518287cfc
|
/app/__init__.py
|
088271f233d139d506f0be527e1bcaa541864b60
|
[] |
no_license
|
Jayin/python-microblog
|
82dcf91dfc253434acf0dafa74724f3b2078bd20
|
3bd09ff79d2fbf8b4a864769ae5050c391fc9739
|
refs/heads/master
| 2020-12-24T14:09:27.391971
| 2014-04-09T13:36:35
| 2014-04-09T13:36:35
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 190
|
py
|
#encoding:utf-8
from flask import Flask
from flask.ext.sqlalchemy import SQLAlchemy
app = Flask(__name__)
app.config.from_object('config')
db = SQLAlchemy(app)
from app import views,models
|
[
"tonjayin@gmail.com"
] |
tonjayin@gmail.com
|
fdd81c4ba3c9985c22ee4b986bf4b92364bd87bd
|
8ee717196b3c1b8674de84e3bcfc081661d3dc7d
|
/resync/test/test_resource_list.py
|
0e0c665f590a1f325335331651512beba880691c
|
[] |
no_license
|
semantalytics/resync
|
61ecd04685dc33f2c54f682936e44def4e4a377f
|
ec4ad4b76d6985d8cc955ae24e512d8074460dd7
|
refs/heads/master
| 2021-01-24T02:00:00.097911
| 2013-03-26T16:59:29
| 2013-03-26T16:59:29
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,636
|
py
|
import unittest
import StringIO
import re
from resync.resource import Resource
from resync.resource_list import ResourceList, ResourceListDupeError
from resync.sitemap import SitemapParseError
class TestResourceList(unittest.TestCase):
def test01_same(self):
src = ResourceList()
src.add( Resource('a',timestamp=1) )
src.add( Resource('b',timestamp=2) )
dst = ResourceList()
dst.add( Resource('a',timestamp=1) )
dst.add( Resource('b',timestamp=2) )
( same, changed, deleted, added ) = dst.compare(src)
self.assertEqual( len(same), 2, "2 things unchanged" )
i = iter(same)
self.assertEqual( i.next().uri, 'a', "first was a" )
self.assertEqual( i.next().uri, 'b', "second was b" )
self.assertEqual( len(changed), 0, "nothing changed" )
self.assertEqual( len(deleted), 0, "nothing deleted" )
self.assertEqual( len(added), 0, "nothing added" )
def test02_changed(self):
src = ResourceList()
src.add( Resource('a',timestamp=1) )
src.add( Resource('b',timestamp=2) )
dst = ResourceList()
dst.add( Resource('a',timestamp=3) )
dst.add( Resource('b',timestamp=4) )
( same, changed, deleted, added ) = dst.compare(src)
self.assertEqual( len(same), 0, "0 things unchanged" )
self.assertEqual( len(changed), 2, "2 things changed" )
i = iter(changed)
self.assertEqual( i.next().uri, 'a', "first was a" )
self.assertEqual( i.next().uri, 'b', "second was b" )
self.assertEqual( len(deleted), 0, "nothing deleted" )
self.assertEqual( len(added), 0, "nothing added" )
def test03_deleted(self):
src = ResourceList()
src.add( Resource('a',timestamp=1) )
src.add( Resource('b',timestamp=2) )
dst = ResourceList()
dst.add( Resource('a',timestamp=1) )
dst.add( Resource('b',timestamp=2) )
dst.add( Resource('c',timestamp=3) )
dst.add( Resource('d',timestamp=4) )
( same, changed, deleted, added ) = dst.compare(src)
self.assertEqual( len(same), 2, "2 things unchanged" )
self.assertEqual( len(changed), 0, "nothing changed" )
self.assertEqual( len(deleted), 2, "c and d deleted" )
i = iter(deleted)
self.assertEqual( i.next().uri, 'c', "first was c" )
self.assertEqual( i.next().uri, 'd', "second was d" )
self.assertEqual( len(added), 0, "nothing added" )
def test04_added(self):
src = ResourceList()
src.add( Resource('a',timestamp=1) )
src.add( Resource('b',timestamp=2) )
src.add( Resource('c',timestamp=3) )
src.add( Resource('d',timestamp=4) )
dst = ResourceList()
dst.add( Resource('a',timestamp=1) )
dst.add( Resource('c',timestamp=3) )
( same, changed, deleted, added ) = dst.compare(src)
self.assertEqual( len(same), 2, "2 things unchanged" )
self.assertEqual( len(changed), 0, "nothing changed" )
self.assertEqual( len(deleted), 0, "nothing deleted" )
self.assertEqual( len(added), 2, "b and d added" )
i = iter(added)
self.assertEqual( i.next().uri, 'b', "first was b" )
self.assertEqual( i.next().uri, 'd', "second was d" )
def test05_add(self):
r1 = Resource(uri='a',length=1)
r2 = Resource(uri='b',length=2)
i = ResourceList()
i.add(r1)
self.assertRaises( ResourceListDupeError, i.add, r1)
i.add(r2)
self.assertRaises( ResourceListDupeError, i.add, r2)
# allow dupes
r1d = Resource(uri='a',length=10)
i.add(r1d,replace=True)
self.assertEqual( len(i), 2 )
self.assertEqual( i.resources['a'].length, 10 )
def test06_add_iterable(self):
r1 = Resource(uri='a',length=1)
r2 = Resource(uri='b',length=2)
i = ResourceList()
i.add( [r1,r2] )
self.assertRaises( ResourceListDupeError, i.add, r1)
self.assertRaises( ResourceListDupeError, i.add, r2)
# allow dupes
r1d = Resource(uri='a',length=10)
i.add( [r1d] ,replace=True)
self.assertEqual( len(i), 2 )
self.assertEqual( i.resources['a'].length, 10 )
def test07_has_md5(self):
r1 = Resource(uri='a')
r2 = Resource(uri='b')
i = ResourceList()
self.assertFalse( i.has_md5() )
i.add(r1)
i.add(r2)
self.assertFalse( i.has_md5() )
r1.md5="aabbcc"
self.assertTrue( i.has_md5() )
def test08_iter(self):
i = ResourceList()
i.add( Resource('a',timestamp=1) )
i.add( Resource('b',timestamp=2) )
i.add( Resource('c',timestamp=3) )
i.add( Resource('d',timestamp=4) )
resources=[]
for r in i:
resources.append(r)
self.assertEqual(len(resources), 4)
self.assertEqual( resources[0].uri, 'a')
self.assertEqual( resources[3].uri, 'd')
def test20_as_xml(self):
rl = ResourceList()
rl.add( Resource('a',timestamp=1) )
rl.add( Resource('b',timestamp=2) )
xml = rl.as_xml()
print xml
self.assertTrue( re.search(r'<rs:md .*capability="resourcelist"', xml), 'XML has capability' )
self.assertTrue( re.search(r'<rs:md .*modified="\d\d\d\d\-\d\d\-\d\dT\d\d:\d\d:\d\dZ"', xml), 'XML has modified to seconds precision (and not more)' )
self.assertTrue( re.search(r'<url><loc>a</loc><lastmod>1970-01-01T00:00:01Z</lastmod></url>', xml), 'XML has resource a' )
def test30_parse(self):
xml='<?xml version=\'1.0\' encoding=\'UTF-8\'?>\n\
<urlset xmlns="http://www.sitemaps.org/schemas/sitemap/0.9" xmlns:rs="http://www.openarchives.org/rs/terms/">\
<rs:md capability="resourcelist" modified="2013-01-01"/>\
<url><loc>/tmp/rs_test/src/file_a</loc><lastmod>2012-03-14T18:37:36Z</lastmod><rs:md change="updated" length="12" /></url>\
<url><loc>/tmp/rs_test/src/file_b</loc><lastmod>2012-03-14T18:37:36Z</lastmod><rs:md length="32" /></url>\
</urlset>'
rl=ResourceList()
rl.parse(fh=StringIO.StringIO(xml))
self.assertEqual( len(rl.resources), 2, 'got 2 resources')
self.assertEqual( rl.md['capability'], 'resourcelist', 'capability set' )
self.assertEqual( rl.md['modified'], '2013-01-01' )
def test31_parse_no_capability(self):
xml='<?xml version=\'1.0\' encoding=\'UTF-8\'?>\n\
<urlset xmlns="http://www.sitemaps.org/schemas/sitemap/0.9">\
<url><loc>http://example.com/res1</loc><lastmod>2012-03-14T18:37:36Z</lastmod></url>\
</urlset>'
rl=ResourceList()
rl.parse(fh=StringIO.StringIO(xml))
self.assertEqual( len(rl.resources), 1, 'got 1 resource')
self.assertEqual( rl.md['capability'], 'resourcelist', 'capability set by reading routine' )
self.assertFalse( 'modified' in rl.md )
def test32_parse_bad_capability(self):
# the <rs:md capability="bad_capability".. should give error
xml='<?xml version=\'1.0\' encoding=\'UTF-8\'?>\n\
<urlset xmlns="http://www.sitemaps.org/schemas/sitemap/0.9" xmlns:rs="http://www.openarchives.org/rs/terms/">\
<rs:md capability="bad_capability" modified="2013-01-01"/>\
<url><loc>http://example.com/bad_res_1</loc><lastmod>2012-03-14T18:37:36Z</lastmod></url>\
</urlset>'
rl=ResourceList()
self.assertRaises( SitemapParseError, rl.parse, fh=StringIO.StringIO(xml) )
if __name__ == '__main__':
suite = unittest.defaultTestLoader.loadTestsFromTestCase(TestResourceList)
unittest.TextTestRunner().run(suite)
|
[
"simeon.warner@cornell.edu"
] |
simeon.warner@cornell.edu
|
6c7a891f3da07aaeb08f6dc637af604162da73ee
|
91c946b62413b1092e344c0bc05ab5808ef9deb8
|
/regex_patent_classification.py
|
7e7b0061faae45e64d9f80f08b24f7794450907f
|
[] |
no_license
|
aaag97/PatentAnalysis
|
b540c0fea30e7b18966ee3940bc6f94715663ddc
|
1096861a67c3aa33ed15a6f4e35919ba05a04cba
|
refs/heads/main
| 2023-04-05T11:01:02.110539
| 2021-04-05T16:34:41
| 2021-04-05T16:34:41
| 303,387,384
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,753
|
py
|
import os
import re
import argparse
from PIL import Image
from ocrutils import OCR_Pages
def has_drawing_from_path(text_path):
"""
function to evaluate whether a patent has a drawing using regex
Args:
text_path - path towards text file
Returns:
boolean indicating whether the patent has a drawing or not according to the regex method
"""
with open(text_path, "r") as text_file:
text_file_str = text_file.read()
return re.search('[Ff][Ii][Gg]\.[0-9]+', text_file_str) or re.search('[Ff][Ii][Gg]\. [0-9]+', text_file_str) or re.search('drawing', text_file_str)
def has_drawing(text_file_str):
"""
function to evaluate whether a patent has a drawing using regex
Args:
text - string of patent
Returns:
boolean indicating whether the patent has a drawing or not according to the regex method
"""
return re.search('[Ff][Ii][Gg]\.[0-9]+', text_file_str) or re.search('[Ff][Ii][Gg]\. [0-9]+', text_file_str) or re.search('drawing', text_file_str)
def get_ordered_images(folder):
"""
function to get all images from a given folder
Args:
folder - the folder which contains the images
Returns:
images - the images in cv2 format
img_names - the filenames of the images
"""
imgs = []
filenames = sorted(os.listdir(folder))
img_names = []
for filename_i in range(len(filenames)):
filename = filenames[filename_i]
img = Image.open(os.path.join(folder,filename))
if img is not None:
imgs.append(img)
img_names.append(filename.split('.')[0])
return imgs, img_names
def parse_args():
"""
function to parse command line arguments
Returns:
args - an argspace object containing the arguments
"""
parser=argparse.ArgumentParser(description='Segement and visualize patent pages.')
parser.add_argument('-i', '--input', type=str, help='the path to a folder containing the images', required=True)
parser.add_argument('-o', '--output', type=str, help='path to the output text file', required=True)
args = parser.parse_args()
return args
def main(pages, output_path):
text = OCR_Pages(pages)
if has_drawing(text):
print('This patent has one or more figures!')
with open(output_path, 'w') as f:
f.write('This patent has one or more figures!')
else:
print('This patent does not have any figures!')
with open(output_path, 'w') as f:
f.write('This patent does not have any figures!')
if __name__ == "__main__":
args = parse_args()
input_path = args.input
output_path = args.output
pages, page_names = get_ordered_images(folder=input_path)
main(pages, output_path=output_path)
|
[
"andre.ghattas@epfl.ch"
] |
andre.ghattas@epfl.ch
|
077791066522a7a2b06a16218258937703b43249
|
0378aa649119767970f4cccbe35afa732346c706
|
/manage.py
|
7ef7772b966f5323343d961a63cfe728363ef583
|
[] |
no_license
|
h4cktivist/job-agency
|
4098041cf318e7740bc0ecae740b1bee195c0771
|
0fa82d42d61deb5e392ccf4856abf88ee8f2f175
|
refs/heads/main
| 2023-05-27T03:03:06.467090
| 2021-06-04T07:50:38
| 2021-06-04T07:50:38
| 366,696,287
| 0
| 0
| null | 2021-06-01T09:05:59
| 2021-05-12T11:44:37
|
HTML
|
UTF-8
|
Python
| false
| false
| 665
|
py
|
#!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
"""Run administrative tasks."""
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'JobAgency.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
|
[
"popovvadim0605@gmail.com"
] |
popovvadim0605@gmail.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.