blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
281
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
57
| license_type
stringclasses 2
values | repo_name
stringlengths 6
116
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 313
values | visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 18.2k
668M
⌀ | star_events_count
int64 0
102k
| fork_events_count
int64 0
38.2k
| gha_license_id
stringclasses 17
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 107
values | src_encoding
stringclasses 20
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 4
6.02M
| extension
stringclasses 78
values | content
stringlengths 2
6.02M
| authors
listlengths 1
1
| author
stringlengths 0
175
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
1d04d3d4f51fb6e30bcbf047d655a4f3121f14ce
|
73dc6b3fdb07592f10b8e02b7ca053deb61a2dc9
|
/msof_api/comments/admin.py
|
4e3be735e10ca856e607a6d04ccf86bad757bf99
|
[] |
no_license
|
likelion-kookmin/msof_api
|
4143c09f93b68d219aa20de3bd57ec544c2bdf32
|
f9fec7d31ebdb465a8935711da715db6d87c0fce
|
refs/heads/develop
| 2023-06-28T15:35:45.240871
| 2021-07-31T16:38:35
| 2021-07-31T16:38:35
| 347,298,658
| 3
| 1
| null | 2021-07-31T16:38:36
| 2021-03-13T07:02:56
|
Python
|
UTF-8
|
Python
| false
| false
| 784
|
py
|
"""# comments admin
- CommentAdmin
"""
from django.contrib import admin
from .models import Comment
@admin.register(Comment)
class CommentAdmin(admin.ModelAdmin):
"""## CommentAdmin
- admin에서 관리할 Comment 모델 설정
"""
list_display = [
'author',
'question',
'parent',
'selected',
'content',
'status',
'liked_count',
'disliked_count',
]
list_editable = [
'status',
]
list_filter = [
'author',
'question',
'parent',
'selected',
'status',
]
search_fields = [
'content',
'author__name',
'question__title',
'question__content',
]
ordering = [
'-updated_at',
]
|
[
"singun11@kookmin.ac.kr"
] |
singun11@kookmin.ac.kr
|
e65ee592dfffea41e8966e5f2acf9d9b8c7f9a31
|
ee3ba2af93581aaca5a1393f3eb22fa794be2a12
|
/app/main/form.py
|
fa863b1c847180707fef5db9f10b6ed3d97e222f
|
[] |
no_license
|
wbchief/myflask
|
303ed98c969c58a07953aa37c28f90ace3b9a284
|
a4d82bc80df84cb7e418058de3519c29e29db7f1
|
refs/heads/master
| 2020-03-30T23:48:10.771252
| 2018-10-14T09:56:17
| 2018-10-14T09:56:17
| 151,713,761
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,266
|
py
|
from flask_pagedown.fields import PageDownField
from flask_wtf import FlaskForm
from wtforms import StringField, TextAreaField, SubmitField, BooleanField, SelectField
from wtforms.validators import Length, DataRequired, Email, ValidationError
from app.models import User, Role
class EditProfileForm(FlaskForm):
'''
编辑资料表单
'''
name = StringField('真实姓名', validators=[Length(0, 64)])
location = StringField('地址', validators=[Length(0, 64)])
about_me = TextAreaField('自我介绍')
submit = SubmitField('确认')
class EditProfileAdminForm(FlaskForm):
'''
管理员编辑资料表单
'''
email = StringField('邮箱',validators=[DataRequired(), Length(1, 64), Email()])
username = StringField('用户名', validators=[DataRequired(), Length(1, 64)])
confirmed = BooleanField('是否确认')
role = SelectField('Role', coerce=int)
name = StringField('真实姓名', validators=[Length(0, 64)])
location = StringField('地址', validators=[Length(0, 64)])
about_me = TextAreaField('自我介绍')
submit = SubmitField('确认')
def __init__(self, user, *args, **kwargs):
super(EditProfileAdminForm, self).__init__(*args, **kwargs)
# selectField 必须在choice属性中设置各选项
self.role.choices = [(role.id, role.name) for role in Role.query.order_by(Role.name).all()]
self.user = user
def validate_email(self, field):
'''
验证邮箱
:param field: 邮箱
:return:
'''
if field.data != self.user.email and User.query.filter_by(email=field.data).first():
raise ValidationError('邮箱已经被注册')
def validate_username(self, field):
'''
验证用户名
:param field: 用户名
:return:
'''
if field.data != self.user.username and User.query.filter_by(username=field.data).first():
raise ValidationError('用户名已经存在')
class PostForm(FlaskForm):
body = PageDownField('想写点什么', validators=[DataRequired()])
submit = SubmitField('提交')
class CommentForm(FlaskForm):
body = StringField('', validators=[DataRequired()])
submit = SubmitField('提交')
|
[
"712640388@qq.com"
] |
712640388@qq.com
|
e9f868ff815024c1e16fbad4fecdd0555aa18e30
|
77c5e35467b91bb10967a141f7caa3a3c0b4095f
|
/associate_name_with_cat_reduce.py
|
a29676a9d7f78ae6f5f9930e603d8adcd190b9b8
|
[] |
no_license
|
yan9liu/title_search
|
cab9cd19841dbf895dc165f0172e079129c8650d
|
b155c4c3b9a4e306a121e89247f4bad72c3bbe65
|
refs/heads/master
| 2021-01-09T21:52:24.787824
| 2016-01-04T07:32:40
| 2016-01-04T07:32:40
| 48,975,494
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 785
|
py
|
import sys
def reduce():
cats = []
names = []
pre_pid = None
for line in sys.stdin:
splits = line.decode('utf-8').strip().split('\t', 1)
pid = splits[0]
if pid != pre_pid:
if cats and names:
output(pre_pid, cats, names)
cats = []
names = []
pre_pid = pid
if ':' in splits[1]:
names.append(splits[1])
else:
cats.append(splits[1])
if cats and names:
output(pre_pid, cats, names)
def output(pid, cats, names):
for name in names:
for cat in cats:
out_s = pid + "\t" + cat + "\t" + name
print out_s.encode('utf-8')
if __name__=="__main__":
reduce()
|
[
"liu.ocean@outlook.com"
] |
liu.ocean@outlook.com
|
6e55abddbe446bbbe2e2f07ae0edd692a27197ed
|
b3ac12dfbb8fa74500b406a0907337011d4aac72
|
/goldcoin/full_node/weight_proof.py
|
c12b097a836dbee13ac9816cccf3f9361015586b
|
[
"Apache-2.0"
] |
permissive
|
chia-os/goldcoin-blockchain
|
ab62add5396b7734c11d3c37c41776994489d5e7
|
5c294688dbbe995ae1d4422803f6fcf3e1cc6077
|
refs/heads/main
| 2023-08-11T23:58:53.617051
| 2021-09-12T15:33:26
| 2021-09-12T15:33:26
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 67,454
|
py
|
import asyncio
import dataclasses
import logging
import math
import random
from concurrent.futures.process import ProcessPoolExecutor
from typing import Dict, List, Optional, Tuple
from goldcoin.consensus.block_header_validation import validate_finished_header_block
from goldcoin.consensus.block_record import BlockRecord
from goldcoin.consensus.blockchain_interface import BlockchainInterface
from goldcoin.consensus.constants import ConsensusConstants
from goldcoin.consensus.deficit import calculate_deficit
from goldcoin.consensus.full_block_to_block_record import header_block_to_sub_block_record
from goldcoin.consensus.pot_iterations import (
calculate_ip_iters,
calculate_iterations_quality,
calculate_sp_iters,
is_overflow_block,
)
from goldcoin.consensus.vdf_info_computation import get_signage_point_vdf_info
from goldcoin.types.blockchain_format.classgroup import ClassgroupElement
from goldcoin.types.blockchain_format.sized_bytes import bytes32
from goldcoin.types.blockchain_format.slots import ChallengeChainSubSlot, RewardChainSubSlot
from goldcoin.types.blockchain_format.sub_epoch_summary import SubEpochSummary
from goldcoin.types.blockchain_format.vdf import VDFInfo
from goldcoin.types.end_of_slot_bundle import EndOfSubSlotBundle
from goldcoin.types.header_block import HeaderBlock
from goldcoin.types.weight_proof import (
SubEpochChallengeSegment,
SubEpochData,
SubSlotData,
WeightProof,
SubEpochSegments,
RecentChainData,
)
from goldcoin.util.block_cache import BlockCache
from goldcoin.util.hash import std_hash
from goldcoin.util.ints import uint8, uint32, uint64, uint128
from goldcoin.util.streamable import dataclass_from_dict, recurse_jsonify
log = logging.getLogger(__name__)
class WeightProofHandler:
LAMBDA_L = 100
C = 0.5
MAX_SAMPLES = 20
def __init__(
self,
constants: ConsensusConstants,
blockchain: BlockchainInterface,
):
self.tip: Optional[bytes32] = None
self.proof: Optional[WeightProof] = None
self.constants = constants
self.blockchain = blockchain
self.lock = asyncio.Lock()
async def get_proof_of_weight(self, tip: bytes32) -> Optional[WeightProof]:
tip_rec = self.blockchain.try_block_record(tip)
if tip_rec is None:
log.error("unknown tip")
return None
if tip_rec.height < self.constants.WEIGHT_PROOF_RECENT_BLOCKS:
log.debug("chain to short for weight proof")
return None
async with self.lock:
if self.proof is not None:
if self.proof.recent_chain_data[-1].header_hash == tip:
return self.proof
wp = await self._create_proof_of_weight(tip)
if wp is None:
return None
self.proof = wp
self.tip = tip
return wp
def get_sub_epoch_data(self, tip_height: uint32, summary_heights: List[uint32]) -> List[SubEpochData]:
sub_epoch_data: List[SubEpochData] = []
for sub_epoch_n, ses_height in enumerate(summary_heights):
if ses_height > tip_height:
break
ses = self.blockchain.get_ses(ses_height)
log.debug(f"handle sub epoch summary {sub_epoch_n} at height: {ses_height} ses {ses}")
sub_epoch_data.append(_create_sub_epoch_data(ses))
return sub_epoch_data
async def _create_proof_of_weight(self, tip: bytes32) -> Optional[WeightProof]:
"""
Creates a weight proof object
"""
assert self.blockchain is not None
sub_epoch_segments: List[SubEpochChallengeSegment] = []
tip_rec = self.blockchain.try_block_record(tip)
if tip_rec is None:
log.error("failed not tip in cache")
return None
log.info(f"create weight proof peak {tip} {tip_rec.height}")
recent_chain = await self._get_recent_chain(tip_rec.height)
if recent_chain is None:
return None
summary_heights = self.blockchain.get_ses_heights()
prev_ses_block = await self.blockchain.get_block_record_from_db(self.blockchain.height_to_hash(uint32(0)))
if prev_ses_block is None:
return None
sub_epoch_data = self.get_sub_epoch_data(tip_rec.height, summary_heights)
# use second to last ses as seed
seed = self.get_seed_for_proof(summary_heights, tip_rec.height)
rng = random.Random(seed)
weight_to_check = _get_weights_for_sampling(rng, tip_rec.weight, recent_chain)
sample_n = 0
ses_blocks = await self.blockchain.get_block_records_at(summary_heights)
if ses_blocks is None:
return None
for sub_epoch_n, ses_height in enumerate(summary_heights):
if ses_height > tip_rec.height:
break
# if we have enough sub_epoch samples, dont sample
if sample_n >= self.MAX_SAMPLES:
log.debug("reached sampled sub epoch cap")
break
# sample sub epoch
# next sub block
ses_block = ses_blocks[sub_epoch_n]
if ses_block is None or ses_block.sub_epoch_summary_included is None:
log.error("error while building proof")
return None
if _sample_sub_epoch(prev_ses_block.weight, ses_block.weight, weight_to_check): # type: ignore
sample_n += 1
segments = await self.blockchain.get_sub_epoch_challenge_segments(ses_block.header_hash)
if segments is None:
segments = await self.__create_sub_epoch_segments(ses_block, prev_ses_block, uint32(sub_epoch_n))
if segments is None:
log.error(
f"failed while building segments for sub epoch {sub_epoch_n}, ses height {ses_height} "
)
return None
await self.blockchain.persist_sub_epoch_challenge_segments(ses_block.header_hash, segments)
log.debug(f"sub epoch {sub_epoch_n} has {len(segments)} segments")
sub_epoch_segments.extend(segments)
prev_ses_block = ses_block
log.debug(f"sub_epochs: {len(sub_epoch_data)}")
return WeightProof(sub_epoch_data, sub_epoch_segments, recent_chain)
def get_seed_for_proof(self, summary_heights: List[uint32], tip_height) -> bytes32:
count = 0
ses = None
for sub_epoch_n, ses_height in enumerate(reversed(summary_heights)):
if ses_height <= tip_height:
count += 1
if count == 2:
ses = self.blockchain.get_ses(ses_height)
break
assert ses is not None
seed = ses.get_hash()
return seed
async def _get_recent_chain(self, tip_height: uint32) -> Optional[List[HeaderBlock]]:
recent_chain: List[HeaderBlock] = []
ses_heights = self.blockchain.get_ses_heights()
min_height = 0
count_ses = 0
for ses_height in reversed(ses_heights):
if ses_height <= tip_height:
count_ses += 1
if count_ses == 2:
min_height = ses_height - 1
break
log.debug(f"start {min_height} end {tip_height}")
headers = await self.blockchain.get_header_blocks_in_range(min_height, tip_height, tx_filter=False)
blocks = await self.blockchain.get_block_records_in_range(min_height, tip_height)
ses_count = 0
curr_height = tip_height
blocks_n = 0
while ses_count < 2:
if curr_height == 0:
break
# add to needed reward chain recent blocks
header_block = headers[self.blockchain.height_to_hash(curr_height)]
block_rec = blocks[header_block.header_hash]
if header_block is None:
log.error("creating recent chain failed")
return None
recent_chain.insert(0, header_block)
if block_rec.sub_epoch_summary_included:
ses_count += 1
curr_height = uint32(curr_height - 1) # type: ignore
blocks_n += 1
header_block = headers[self.blockchain.height_to_hash(curr_height)]
recent_chain.insert(0, header_block)
log.info(
f"recent chain, "
f"start: {recent_chain[0].reward_chain_block.height} "
f"end: {recent_chain[-1].reward_chain_block.height} "
)
return recent_chain
async def create_prev_sub_epoch_segments(self):
log.debug("create prev sub_epoch_segments")
heights = self.blockchain.get_ses_heights()
if len(heights) < 3:
return None
count = len(heights) - 2
ses_sub_block = self.blockchain.height_to_block_record(heights[-2])
prev_ses_sub_block = self.blockchain.height_to_block_record(heights[-3])
assert prev_ses_sub_block.sub_epoch_summary_included is not None
segments = await self.__create_sub_epoch_segments(ses_sub_block, prev_ses_sub_block, uint32(count))
assert segments is not None
await self.blockchain.persist_sub_epoch_challenge_segments(ses_sub_block.header_hash, segments)
log.debug("sub_epoch_segments done")
return None
async def create_sub_epoch_segments(self):
log.debug("check segments in db")
"""
Creates a weight proof object
"""
assert self.blockchain is not None
peak_height = self.blockchain.get_peak_height()
if peak_height is None:
log.error("no peak yet")
return None
summary_heights = self.blockchain.get_ses_heights()
prev_ses_block = await self.blockchain.get_block_record_from_db(self.blockchain.height_to_hash(uint32(0)))
if prev_ses_block is None:
return None
ses_blocks = await self.blockchain.get_block_records_at(summary_heights)
if ses_blocks is None:
return None
for sub_epoch_n, ses_height in enumerate(summary_heights):
log.debug(f"check db for sub epoch {sub_epoch_n}")
if ses_height > peak_height:
break
ses_block = ses_blocks[sub_epoch_n]
if ses_block is None or ses_block.sub_epoch_summary_included is None:
log.error("error while building proof")
return None
await self.__create_persist_segment(prev_ses_block, ses_block, ses_height, sub_epoch_n)
prev_ses_block = ses_block
await asyncio.sleep(2)
log.debug("done checking segments")
return None
async def __create_persist_segment(self, prev_ses_block, ses_block, ses_height, sub_epoch_n):
segments = await self.blockchain.get_sub_epoch_challenge_segments(ses_block.header_hash)
if segments is None:
segments = await self.__create_sub_epoch_segments(ses_block, prev_ses_block, uint32(sub_epoch_n))
if segments is None:
log.error(f"failed while building segments for sub epoch {sub_epoch_n}, ses height {ses_height} ")
return None
await self.blockchain.persist_sub_epoch_challenge_segments(ses_block.header_hash, segments)
async def __create_sub_epoch_segments(
self, ses_block: BlockRecord, se_start: BlockRecord, sub_epoch_n: uint32
) -> Optional[List[SubEpochChallengeSegment]]:
segments: List[SubEpochChallengeSegment] = []
start_height = await self.get_prev_two_slots_height(se_start)
blocks = await self.blockchain.get_block_records_in_range(
start_height, ses_block.height + self.constants.MAX_SUB_SLOT_BLOCKS
)
header_blocks = await self.blockchain.get_header_blocks_in_range(
start_height, ses_block.height + self.constants.MAX_SUB_SLOT_BLOCKS, tx_filter=False
)
curr: Optional[HeaderBlock] = header_blocks[se_start.header_hash]
height = se_start.height
assert curr is not None
first = True
idx = 0
while curr.height < ses_block.height:
if blocks[curr.header_hash].is_challenge_block(self.constants):
log.debug(f"challenge segment {idx}, starts at {curr.height} ")
seg, height = await self._create_challenge_segment(curr, sub_epoch_n, header_blocks, blocks, first)
if seg is None:
log.error(f"failed creating segment {curr.header_hash} ")
return None
segments.append(seg)
idx += 1
first = False
else:
height = height + uint32(1) # type: ignore
curr = header_blocks[self.blockchain.height_to_hash(height)]
if curr is None:
return None
log.debug(f"next sub epoch starts at {height}")
return segments
async def get_prev_two_slots_height(self, se_start: BlockRecord) -> uint32:
# find prev 2 slots height
slot = 0
batch_size = 50
curr_rec = se_start
blocks = await self.blockchain.get_block_records_in_range(curr_rec.height - batch_size, curr_rec.height)
end = curr_rec.height
while slot < 2 and curr_rec.height > 0:
if curr_rec.first_in_sub_slot:
slot += 1
if end - curr_rec.height == batch_size - 1:
blocks = await self.blockchain.get_block_records_in_range(curr_rec.height - batch_size, curr_rec.height)
end = curr_rec.height
curr_rec = blocks[self.blockchain.height_to_hash(uint32(curr_rec.height - 1))]
return curr_rec.height
async def _create_challenge_segment(
self,
header_block: HeaderBlock,
sub_epoch_n: uint32,
header_blocks: Dict[bytes32, HeaderBlock],
blocks: Dict[bytes32, BlockRecord],
first_segment_in_sub_epoch: bool,
) -> Tuple[Optional[SubEpochChallengeSegment], uint32]:
assert self.blockchain is not None
sub_slots: List[SubSlotData] = []
log.debug(f"create challenge segment block {header_block.header_hash} block height {header_block.height} ")
# VDFs from sub slots before challenge block
first_sub_slots, first_rc_end_of_slot_vdf = await self.__first_sub_slot_vdfs(
header_block, header_blocks, blocks, first_segment_in_sub_epoch
)
if first_sub_slots is None:
log.error("failed building first sub slots")
return None, uint32(0)
sub_slots.extend(first_sub_slots)
ssd = await _challenge_block_vdfs(
self.constants,
header_block,
blocks[header_block.header_hash],
blocks,
)
sub_slots.append(ssd)
# # VDFs from slot after challenge block to end of slot
log.debug(f"create slot end vdf for block {header_block.header_hash} height {header_block.height} ")
challenge_slot_end_sub_slots, end_height = await self.__slot_end_vdf(
uint32(header_block.height + 1), header_blocks, blocks
)
if challenge_slot_end_sub_slots is None:
log.error("failed building slot end ")
return None, uint32(0)
sub_slots.extend(challenge_slot_end_sub_slots)
if first_segment_in_sub_epoch and sub_epoch_n != 0:
return (
SubEpochChallengeSegment(sub_epoch_n, sub_slots, first_rc_end_of_slot_vdf),
end_height,
)
return SubEpochChallengeSegment(sub_epoch_n, sub_slots, None), end_height
# returns a challenge chain vdf from slot start to signage point
async def __first_sub_slot_vdfs(
self,
header_block: HeaderBlock,
header_blocks: Dict[bytes32, HeaderBlock],
blocks: Dict[bytes32, BlockRecord],
first_in_sub_epoch: bool,
) -> Tuple[Optional[List[SubSlotData]], Optional[VDFInfo]]:
# combine cc vdfs of all reward blocks from the start of the sub slot to end
header_block_sub_rec = blocks[header_block.header_hash]
# find slot start
curr_sub_rec = header_block_sub_rec
first_rc_end_of_slot_vdf = None
if first_in_sub_epoch and curr_sub_rec.height > 0:
while not curr_sub_rec.sub_epoch_summary_included:
curr_sub_rec = blocks[curr_sub_rec.prev_hash]
first_rc_end_of_slot_vdf = self.first_rc_end_of_slot_vdf(header_block, blocks, header_blocks)
else:
if header_block_sub_rec.overflow and header_block_sub_rec.first_in_sub_slot:
sub_slots_num = 2
while sub_slots_num > 0 and curr_sub_rec.height > 0:
if curr_sub_rec.first_in_sub_slot:
assert curr_sub_rec.finished_challenge_slot_hashes is not None
sub_slots_num -= len(curr_sub_rec.finished_challenge_slot_hashes)
curr_sub_rec = blocks[curr_sub_rec.prev_hash]
else:
while not curr_sub_rec.first_in_sub_slot and curr_sub_rec.height > 0:
curr_sub_rec = blocks[curr_sub_rec.prev_hash]
curr = header_blocks[curr_sub_rec.header_hash]
sub_slots_data: List[SubSlotData] = []
tmp_sub_slots_data: List[SubSlotData] = []
while curr.height < header_block.height:
if curr is None:
log.error("failed fetching block")
return None, None
if curr.first_in_sub_slot:
# if not blue boxed
if not blue_boxed_end_of_slot(curr.finished_sub_slots[0]):
sub_slots_data.extend(tmp_sub_slots_data)
for idx, sub_slot in enumerate(curr.finished_sub_slots):
curr_icc_info = None
if sub_slot.infused_challenge_chain is not None:
curr_icc_info = sub_slot.infused_challenge_chain.infused_challenge_chain_end_of_slot_vdf
sub_slots_data.append(handle_finished_slots(sub_slot, curr_icc_info))
tmp_sub_slots_data = []
ssd = SubSlotData(
None,
None,
None,
None,
None,
curr.reward_chain_block.signage_point_index,
None,
None,
None,
None,
curr.reward_chain_block.challenge_chain_ip_vdf,
curr.reward_chain_block.infused_challenge_chain_ip_vdf,
curr.total_iters,
)
tmp_sub_slots_data.append(ssd)
curr = header_blocks[self.blockchain.height_to_hash(uint32(curr.height + 1))]
if len(tmp_sub_slots_data) > 0:
sub_slots_data.extend(tmp_sub_slots_data)
for idx, sub_slot in enumerate(header_block.finished_sub_slots):
curr_icc_info = None
if sub_slot.infused_challenge_chain is not None:
curr_icc_info = sub_slot.infused_challenge_chain.infused_challenge_chain_end_of_slot_vdf
sub_slots_data.append(handle_finished_slots(sub_slot, curr_icc_info))
return sub_slots_data, first_rc_end_of_slot_vdf
def first_rc_end_of_slot_vdf(
self,
header_block,
blocks: Dict[bytes32, BlockRecord],
header_blocks: Dict[bytes32, HeaderBlock],
) -> Optional[VDFInfo]:
curr = blocks[header_block.header_hash]
while curr.height > 0 and not curr.sub_epoch_summary_included:
curr = blocks[curr.prev_hash]
return header_blocks[curr.header_hash].finished_sub_slots[-1].reward_chain.end_of_slot_vdf
async def __slot_end_vdf(
self, start_height: uint32, header_blocks: Dict[bytes32, HeaderBlock], blocks: Dict[bytes32, BlockRecord]
) -> Tuple[Optional[List[SubSlotData]], uint32]:
# gets all vdfs first sub slot after challenge block to last sub slot
log.debug(f"slot end vdf start height {start_height}")
curr = header_blocks[self.blockchain.height_to_hash(start_height)]
curr_header_hash = curr.header_hash
sub_slots_data: List[SubSlotData] = []
tmp_sub_slots_data: List[SubSlotData] = []
while not blocks[curr_header_hash].is_challenge_block(self.constants):
if curr.first_in_sub_slot:
sub_slots_data.extend(tmp_sub_slots_data)
curr_prev_header_hash = curr.prev_header_hash
# add collected vdfs
for idx, sub_slot in enumerate(curr.finished_sub_slots):
prev_rec = blocks[curr_prev_header_hash]
eos_vdf_iters = prev_rec.sub_slot_iters
if idx == 0:
eos_vdf_iters = uint64(prev_rec.sub_slot_iters - prev_rec.ip_iters(self.constants))
sub_slots_data.append(handle_end_of_slot(sub_slot, eos_vdf_iters))
tmp_sub_slots_data = []
tmp_sub_slots_data.append(self.handle_block_vdfs(curr, blocks))
curr = header_blocks[self.blockchain.height_to_hash(uint32(curr.height + 1))]
curr_header_hash = curr.header_hash
if len(tmp_sub_slots_data) > 0:
sub_slots_data.extend(tmp_sub_slots_data)
log.debug(f"slot end vdf end height {curr.height} slots {len(sub_slots_data)} ")
return sub_slots_data, curr.height
def handle_block_vdfs(self, curr: HeaderBlock, blocks: Dict[bytes32, BlockRecord]):
cc_sp_proof = None
icc_ip_proof = None
cc_sp_info = None
icc_ip_info = None
block_record = blocks[curr.header_hash]
if curr.infused_challenge_chain_ip_proof is not None:
assert curr.reward_chain_block.infused_challenge_chain_ip_vdf
icc_ip_proof = curr.infused_challenge_chain_ip_proof
icc_ip_info = curr.reward_chain_block.infused_challenge_chain_ip_vdf
if curr.challenge_chain_sp_proof is not None:
assert curr.reward_chain_block.challenge_chain_sp_vdf
cc_sp_vdf_info = curr.reward_chain_block.challenge_chain_sp_vdf
if not curr.challenge_chain_sp_proof.normalized_to_identity:
(_, _, _, _, cc_vdf_iters, _,) = get_signage_point_vdf_info(
self.constants,
curr.finished_sub_slots,
block_record.overflow,
None if curr.height == 0 else blocks[curr.prev_header_hash],
BlockCache(blocks),
block_record.sp_total_iters(self.constants),
block_record.sp_iters(self.constants),
)
cc_sp_vdf_info = VDFInfo(
curr.reward_chain_block.challenge_chain_sp_vdf.challenge,
cc_vdf_iters,
curr.reward_chain_block.challenge_chain_sp_vdf.output,
)
cc_sp_proof = curr.challenge_chain_sp_proof
cc_sp_info = cc_sp_vdf_info
return SubSlotData(
None,
cc_sp_proof,
curr.challenge_chain_ip_proof,
icc_ip_proof,
cc_sp_info,
curr.reward_chain_block.signage_point_index,
None,
None,
None,
None,
curr.reward_chain_block.challenge_chain_ip_vdf,
icc_ip_info,
curr.total_iters,
)
def validate_weight_proof_single_proc(self, weight_proof: WeightProof) -> Tuple[bool, uint32]:
assert self.blockchain is not None
assert len(weight_proof.sub_epochs) > 0
if len(weight_proof.sub_epochs) == 0:
return False, uint32(0)
peak_height = weight_proof.recent_chain_data[-1].reward_chain_block.height
log.info(f"validate weight proof peak height {peak_height}")
summaries, sub_epoch_weight_list = _validate_sub_epoch_summaries(self.constants, weight_proof)
if summaries is None:
log.warning("weight proof failed sub epoch data validation")
return False, uint32(0)
constants, summary_bytes, wp_segment_bytes, wp_recent_chain_bytes = vars_to_bytes(
self.constants, summaries, weight_proof
)
log.info("validate sub epoch challenge segments")
seed = summaries[-2].get_hash()
rng = random.Random(seed)
if not validate_sub_epoch_sampling(rng, sub_epoch_weight_list, weight_proof):
log.error("failed weight proof sub epoch sample validation")
return False, uint32(0)
if not _validate_sub_epoch_segments(constants, rng, wp_segment_bytes, summary_bytes):
return False, uint32(0)
log.info("validate weight proof recent blocks")
if not _validate_recent_blocks(constants, wp_recent_chain_bytes, summary_bytes):
return False, uint32(0)
return True, self.get_fork_point(summaries)
def get_fork_point_no_validations(self, weight_proof: WeightProof) -> Tuple[bool, uint32]:
log.debug("get fork point skip validations")
assert self.blockchain is not None
assert len(weight_proof.sub_epochs) > 0
if len(weight_proof.sub_epochs) == 0:
return False, uint32(0)
summaries, sub_epoch_weight_list = _validate_sub_epoch_summaries(self.constants, weight_proof)
if summaries is None:
log.warning("weight proof failed to validate sub epoch summaries")
return False, uint32(0)
return True, self.get_fork_point(summaries)
async def validate_weight_proof(self, weight_proof: WeightProof) -> Tuple[bool, uint32, List[SubEpochSummary]]:
assert self.blockchain is not None
assert len(weight_proof.sub_epochs) > 0
if len(weight_proof.sub_epochs) == 0:
return False, uint32(0), []
peak_height = weight_proof.recent_chain_data[-1].reward_chain_block.height
log.info(f"validate weight proof peak height {peak_height}")
summaries, sub_epoch_weight_list = _validate_sub_epoch_summaries(self.constants, weight_proof)
if summaries is None:
log.error("weight proof failed sub epoch data validation")
return False, uint32(0), []
seed = summaries[-2].get_hash()
rng = random.Random(seed)
if not validate_sub_epoch_sampling(rng, sub_epoch_weight_list, weight_proof):
log.error("failed weight proof sub epoch sample validation")
return False, uint32(0), []
executor = ProcessPoolExecutor(1)
constants, summary_bytes, wp_segment_bytes, wp_recent_chain_bytes = vars_to_bytes(
self.constants, summaries, weight_proof
)
segment_validation_task = asyncio.get_running_loop().run_in_executor(
executor, _validate_sub_epoch_segments, constants, rng, wp_segment_bytes, summary_bytes
)
recent_blocks_validation_task = asyncio.get_running_loop().run_in_executor(
executor, _validate_recent_blocks, constants, wp_recent_chain_bytes, summary_bytes
)
valid_segment_task = segment_validation_task
valid_recent_blocks_task = recent_blocks_validation_task
valid_recent_blocks = await valid_recent_blocks_task
if not valid_recent_blocks:
log.error("failed validating weight proof recent blocks")
return False, uint32(0), []
valid_segments = await valid_segment_task
if not valid_segments:
log.error("failed validating weight proof sub epoch segments")
return False, uint32(0), []
return True, self.get_fork_point(summaries), summaries
def get_fork_point(self, received_summaries: List[SubEpochSummary]) -> uint32:
# iterate through sub epoch summaries to find fork point
fork_point_index = 0
ses_heights = self.blockchain.get_ses_heights()
for idx, summary_height in enumerate(ses_heights):
log.debug(f"check summary {idx} height {summary_height}")
local_ses = self.blockchain.get_ses(summary_height)
if idx == len(received_summaries) - 1:
# end of wp summaries, local chain is longer or equal to wp chain
break
if local_ses is None or local_ses.get_hash() != received_summaries[idx].get_hash():
break
fork_point_index = idx
if fork_point_index > 2:
# Two summeries can have different blocks and still be identical
# This gets resolved after one full sub epoch
height = ses_heights[fork_point_index - 2]
else:
height = uint32(0)
return height
def _get_weights_for_sampling(
rng: random.Random, total_weight: uint128, recent_chain: List[HeaderBlock]
) -> Optional[List[uint128]]:
weight_to_check = []
last_l_weight = recent_chain[-1].reward_chain_block.weight - recent_chain[0].reward_chain_block.weight
delta = last_l_weight / total_weight
prob_of_adv_succeeding = 1 - math.log(WeightProofHandler.C, delta)
if prob_of_adv_succeeding <= 0:
return None
queries = -WeightProofHandler.LAMBDA_L * math.log(2, prob_of_adv_succeeding)
for i in range(int(queries) + 1):
u = rng.random()
q = 1 - delta ** u
# todo check division and type conversions
weight = q * float(total_weight)
weight_to_check.append(uint128(int(weight)))
weight_to_check.sort()
return weight_to_check
def _sample_sub_epoch(
start_of_epoch_weight: uint128,
end_of_epoch_weight: uint128,
weight_to_check: List[uint128],
) -> bool:
"""
weight_to_check: List[uint128] is expected to be sorted
"""
if weight_to_check is None:
return True
if weight_to_check[-1] < start_of_epoch_weight:
return False
if weight_to_check[0] > end_of_epoch_weight:
return False
choose = False
for weight in weight_to_check:
if weight > end_of_epoch_weight:
return False
if start_of_epoch_weight < weight < end_of_epoch_weight:
log.debug(f"start weight: {start_of_epoch_weight}")
log.debug(f"weight to check {weight}")
log.debug(f"end weight: {end_of_epoch_weight}")
choose = True
break
return choose
# wp creation methods
def _create_sub_epoch_data(
sub_epoch_summary: SubEpochSummary,
) -> SubEpochData:
reward_chain_hash: bytes32 = sub_epoch_summary.reward_chain_hash
# Number of subblocks overflow in previous slot
previous_sub_epoch_overflows: uint8 = sub_epoch_summary.num_blocks_overflow # total in sub epoch - expected
# New work difficulty and iterations per sub-slot
sub_slot_iters: Optional[uint64] = sub_epoch_summary.new_sub_slot_iters
new_difficulty: Optional[uint64] = sub_epoch_summary.new_difficulty
return SubEpochData(reward_chain_hash, previous_sub_epoch_overflows, sub_slot_iters, new_difficulty)
async def _challenge_block_vdfs(
constants: ConsensusConstants,
header_block: HeaderBlock,
block_rec: BlockRecord,
sub_blocks: Dict[bytes32, BlockRecord],
):
(_, _, _, _, cc_vdf_iters, _,) = get_signage_point_vdf_info(
constants,
header_block.finished_sub_slots,
block_rec.overflow,
None if header_block.height == 0 else sub_blocks[header_block.prev_header_hash],
BlockCache(sub_blocks),
block_rec.sp_total_iters(constants),
block_rec.sp_iters(constants),
)
cc_sp_info = None
if header_block.reward_chain_block.challenge_chain_sp_vdf:
cc_sp_info = header_block.reward_chain_block.challenge_chain_sp_vdf
assert header_block.challenge_chain_sp_proof
if not header_block.challenge_chain_sp_proof.normalized_to_identity:
cc_sp_info = VDFInfo(
header_block.reward_chain_block.challenge_chain_sp_vdf.challenge,
cc_vdf_iters,
header_block.reward_chain_block.challenge_chain_sp_vdf.output,
)
ssd = SubSlotData(
header_block.reward_chain_block.proof_of_space,
header_block.challenge_chain_sp_proof,
header_block.challenge_chain_ip_proof,
None,
cc_sp_info,
header_block.reward_chain_block.signage_point_index,
None,
None,
None,
None,
header_block.reward_chain_block.challenge_chain_ip_vdf,
header_block.reward_chain_block.infused_challenge_chain_ip_vdf,
block_rec.total_iters,
)
return ssd
def handle_finished_slots(end_of_slot: EndOfSubSlotBundle, icc_end_of_slot_info):
return SubSlotData(
None,
None,
None,
None,
None,
None,
None
if end_of_slot.proofs.challenge_chain_slot_proof is None
else end_of_slot.proofs.challenge_chain_slot_proof,
None
if end_of_slot.proofs.infused_challenge_chain_slot_proof is None
else end_of_slot.proofs.infused_challenge_chain_slot_proof,
end_of_slot.challenge_chain.challenge_chain_end_of_slot_vdf,
icc_end_of_slot_info,
None,
None,
None,
)
def handle_end_of_slot(
sub_slot: EndOfSubSlotBundle,
eos_vdf_iters: uint64,
):
assert sub_slot.infused_challenge_chain
assert sub_slot.proofs.infused_challenge_chain_slot_proof
if sub_slot.proofs.infused_challenge_chain_slot_proof.normalized_to_identity:
icc_info = sub_slot.infused_challenge_chain.infused_challenge_chain_end_of_slot_vdf
else:
icc_info = VDFInfo(
sub_slot.infused_challenge_chain.infused_challenge_chain_end_of_slot_vdf.challenge,
eos_vdf_iters,
sub_slot.infused_challenge_chain.infused_challenge_chain_end_of_slot_vdf.output,
)
if sub_slot.proofs.challenge_chain_slot_proof.normalized_to_identity:
cc_info = sub_slot.challenge_chain.challenge_chain_end_of_slot_vdf
else:
cc_info = VDFInfo(
sub_slot.challenge_chain.challenge_chain_end_of_slot_vdf.challenge,
eos_vdf_iters,
sub_slot.challenge_chain.challenge_chain_end_of_slot_vdf.output,
)
assert sub_slot.proofs.infused_challenge_chain_slot_proof is not None
return SubSlotData(
None,
None,
None,
None,
None,
None,
sub_slot.proofs.challenge_chain_slot_proof,
sub_slot.proofs.infused_challenge_chain_slot_proof,
cc_info,
icc_info,
None,
None,
None,
)
def compress_segments(full_segment_index, segments: List[SubEpochChallengeSegment]) -> List[SubEpochChallengeSegment]:
compressed_segments = []
compressed_segments.append(segments[0])
for idx, segment in enumerate(segments[1:]):
if idx != full_segment_index:
# remove all redundant values
segment = compress_segment(segment)
compressed_segments.append(segment)
return compressed_segments
def compress_segment(segment: SubEpochChallengeSegment) -> SubEpochChallengeSegment:
# find challenge slot
comp_seg = SubEpochChallengeSegment(segment.sub_epoch_n, [], segment.rc_slot_end_info)
for slot in segment.sub_slots:
comp_seg.sub_slots.append(slot)
if slot.is_challenge():
break
return segment
# wp validation methods
def _validate_sub_epoch_summaries(
constants: ConsensusConstants,
weight_proof: WeightProof,
) -> Tuple[Optional[List[SubEpochSummary]], Optional[List[uint128]]]:
last_ses_hash, last_ses_sub_height = _get_last_ses_hash(constants, weight_proof.recent_chain_data)
if last_ses_hash is None:
log.warning("could not find last ses block")
return None, None
summaries, total, sub_epoch_weight_list = _map_sub_epoch_summaries(
constants.SUB_EPOCH_BLOCKS,
constants.GENESIS_CHALLENGE,
weight_proof.sub_epochs,
constants.DIFFICULTY_STARTING,
)
log.info(f"validating {len(summaries)} sub epochs")
# validate weight
if not _validate_summaries_weight(constants, total, summaries, weight_proof):
log.error("failed validating weight")
return None, None
last_ses = summaries[-1]
log.debug(f"last ses sub height {last_ses_sub_height}")
# validate last ses_hash
if last_ses.get_hash() != last_ses_hash:
log.error(f"failed to validate ses hashes block height {last_ses_sub_height}")
return None, None
return summaries, sub_epoch_weight_list
def _map_sub_epoch_summaries(
sub_blocks_for_se: uint32,
ses_hash: bytes32,
sub_epoch_data: List[SubEpochData],
curr_difficulty: uint64,
) -> Tuple[List[SubEpochSummary], uint128, List[uint128]]:
total_weight: uint128 = uint128(0)
summaries: List[SubEpochSummary] = []
sub_epoch_weight_list: List[uint128] = []
for idx, data in enumerate(sub_epoch_data):
ses = SubEpochSummary(
ses_hash,
data.reward_chain_hash,
data.num_blocks_overflow,
data.new_difficulty,
data.new_sub_slot_iters,
)
if idx < len(sub_epoch_data) - 1:
delta = 0
if idx > 0:
delta = sub_epoch_data[idx].num_blocks_overflow
log.debug(f"sub epoch {idx} start weight is {total_weight+curr_difficulty} ")
sub_epoch_weight_list.append(uint128(total_weight + curr_difficulty))
total_weight = total_weight + uint128( # type: ignore
curr_difficulty * (sub_blocks_for_se + sub_epoch_data[idx + 1].num_blocks_overflow - delta)
)
# if new epoch update diff and iters
if data.new_difficulty is not None:
curr_difficulty = data.new_difficulty
# add to dict
summaries.append(ses)
ses_hash = std_hash(ses)
# add last sub epoch weight
sub_epoch_weight_list.append(uint128(total_weight + curr_difficulty))
return summaries, total_weight, sub_epoch_weight_list
def _validate_summaries_weight(constants: ConsensusConstants, sub_epoch_data_weight, summaries, weight_proof) -> bool:
num_over = summaries[-1].num_blocks_overflow
ses_end_height = (len(summaries) - 1) * constants.SUB_EPOCH_BLOCKS + num_over - 1
curr = None
for block in weight_proof.recent_chain_data:
if block.reward_chain_block.height == ses_end_height:
curr = block
if curr is None:
return False
return curr.reward_chain_block.weight == sub_epoch_data_weight
def _validate_sub_epoch_segments(
constants_dict: Dict,
rng: random.Random,
weight_proof_bytes: bytes,
summaries_bytes: List[bytes],
):
constants, summaries = bytes_to_vars(constants_dict, summaries_bytes)
sub_epoch_segments: SubEpochSegments = SubEpochSegments.from_bytes(weight_proof_bytes)
rc_sub_slot_hash = constants.GENESIS_CHALLENGE
total_blocks, total_ip_iters = 0, 0
total_slot_iters, total_slots = 0, 0
total_ip_iters = 0
prev_ses: Optional[SubEpochSummary] = None
segments_by_sub_epoch = map_segments_by_sub_epoch(sub_epoch_segments.challenge_segments)
curr_ssi = constants.SUB_SLOT_ITERS_STARTING
for sub_epoch_n, segments in segments_by_sub_epoch.items():
prev_ssi = curr_ssi
curr_difficulty, curr_ssi = _get_curr_diff_ssi(constants, sub_epoch_n, summaries)
log.debug(f"validate sub epoch {sub_epoch_n}")
# recreate RewardChainSubSlot for next ses rc_hash
sampled_seg_index = rng.choice(range(len(segments)))
if sub_epoch_n > 0:
rc_sub_slot = __get_rc_sub_slot(constants, segments[0], summaries, curr_ssi)
prev_ses = summaries[sub_epoch_n - 1]
rc_sub_slot_hash = rc_sub_slot.get_hash()
if not summaries[sub_epoch_n].reward_chain_hash == rc_sub_slot_hash:
log.error(f"failed reward_chain_hash validation sub_epoch {sub_epoch_n}")
return False
for idx, segment in enumerate(segments):
valid_segment, ip_iters, slot_iters, slots = _validate_segment(
constants, segment, curr_ssi, prev_ssi, curr_difficulty, prev_ses, idx == 0, sampled_seg_index == idx
)
if not valid_segment:
log.error(f"failed to validate sub_epoch {segment.sub_epoch_n} segment {idx} slots")
return False
prev_ses = None
total_blocks += 1
total_slot_iters += slot_iters
total_slots += slots
total_ip_iters += ip_iters
return True
def _validate_segment(
constants: ConsensusConstants,
segment: SubEpochChallengeSegment,
curr_ssi: uint64,
prev_ssi: uint64,
curr_difficulty: uint64,
ses: Optional[SubEpochSummary],
first_segment_in_se: bool,
sampled: bool,
) -> Tuple[bool, int, int, int]:
ip_iters, slot_iters, slots = 0, 0, 0
after_challenge = False
for idx, sub_slot_data in enumerate(segment.sub_slots):
if sampled and sub_slot_data.is_challenge():
after_challenge = True
required_iters = __validate_pospace(constants, segment, idx, curr_difficulty, ses, first_segment_in_se)
if required_iters is None:
return False, uint64(0), uint64(0), uint64(0)
assert sub_slot_data.signage_point_index is not None
ip_iters = ip_iters + calculate_ip_iters( # type: ignore
constants, curr_ssi, sub_slot_data.signage_point_index, required_iters
)
if not _validate_challenge_block_vdfs(constants, idx, segment.sub_slots, curr_ssi):
log.error(f"failed to validate challenge slot {idx} vdfs")
return False, uint64(0), uint64(0), uint64(0)
elif sampled and after_challenge:
if not _validate_sub_slot_data(constants, idx, segment.sub_slots, curr_ssi):
log.error(f"failed to validate sub slot data {idx} vdfs")
return False, uint64(0), uint64(0), uint64(0)
slot_iters = slot_iters + curr_ssi # type: ignore
slots = slots + uint64(1) # type: ignore
return True, ip_iters, slot_iters, slots
def _validate_challenge_block_vdfs(
constants: ConsensusConstants,
sub_slot_idx: int,
sub_slots: List[SubSlotData],
ssi: uint64,
) -> bool:
sub_slot_data = sub_slots[sub_slot_idx]
if sub_slot_data.cc_signage_point is not None and sub_slot_data.cc_sp_vdf_info:
assert sub_slot_data.signage_point_index
sp_input = ClassgroupElement.get_default_element()
if not sub_slot_data.cc_signage_point.normalized_to_identity and sub_slot_idx >= 1:
is_overflow = is_overflow_block(constants, sub_slot_data.signage_point_index)
prev_ssd = sub_slots[sub_slot_idx - 1]
sp_input = sub_slot_data_vdf_input(
constants, sub_slot_data, sub_slot_idx, sub_slots, is_overflow, prev_ssd.is_end_of_slot(), ssi
)
if not sub_slot_data.cc_signage_point.is_valid(constants, sp_input, sub_slot_data.cc_sp_vdf_info):
log.error(f"failed to validate challenge chain signage point 2 {sub_slot_data.cc_sp_vdf_info}")
return False
assert sub_slot_data.cc_infusion_point
assert sub_slot_data.cc_ip_vdf_info
ip_input = ClassgroupElement.get_default_element()
cc_ip_vdf_info = sub_slot_data.cc_ip_vdf_info
if not sub_slot_data.cc_infusion_point.normalized_to_identity and sub_slot_idx >= 1:
prev_ssd = sub_slots[sub_slot_idx - 1]
if prev_ssd.cc_slot_end is None:
assert prev_ssd.cc_ip_vdf_info
assert prev_ssd.total_iters
assert sub_slot_data.total_iters
ip_input = prev_ssd.cc_ip_vdf_info.output
ip_vdf_iters = uint64(sub_slot_data.total_iters - prev_ssd.total_iters)
cc_ip_vdf_info = VDFInfo(
sub_slot_data.cc_ip_vdf_info.challenge, ip_vdf_iters, sub_slot_data.cc_ip_vdf_info.output
)
if not sub_slot_data.cc_infusion_point.is_valid(constants, ip_input, cc_ip_vdf_info):
log.error(f"failed to validate challenge chain infusion point {sub_slot_data.cc_ip_vdf_info}")
return False
return True
def _validate_sub_slot_data(
constants: ConsensusConstants,
sub_slot_idx: int,
sub_slots: List[SubSlotData],
ssi: uint64,
) -> bool:
sub_slot_data = sub_slots[sub_slot_idx]
assert sub_slot_idx > 0
prev_ssd = sub_slots[sub_slot_idx - 1]
if sub_slot_data.is_end_of_slot():
if sub_slot_data.icc_slot_end is not None:
input = ClassgroupElement.get_default_element()
if not sub_slot_data.icc_slot_end.normalized_to_identity and prev_ssd.icc_ip_vdf_info is not None:
assert prev_ssd.icc_ip_vdf_info
input = prev_ssd.icc_ip_vdf_info.output
assert sub_slot_data.icc_slot_end_info
if not sub_slot_data.icc_slot_end.is_valid(constants, input, sub_slot_data.icc_slot_end_info, None):
log.error(f"failed icc slot end validation {sub_slot_data.icc_slot_end_info} ")
return False
assert sub_slot_data.cc_slot_end_info
assert sub_slot_data.cc_slot_end
input = ClassgroupElement.get_default_element()
if (not prev_ssd.is_end_of_slot()) and (not sub_slot_data.cc_slot_end.normalized_to_identity):
assert prev_ssd.cc_ip_vdf_info
input = prev_ssd.cc_ip_vdf_info.output
if not sub_slot_data.cc_slot_end.is_valid(constants, input, sub_slot_data.cc_slot_end_info):
log.error(f"failed cc slot end validation {sub_slot_data.cc_slot_end_info}")
return False
else:
# find end of slot
idx = sub_slot_idx
while idx < len(sub_slots) - 1:
curr_slot = sub_slots[idx]
if curr_slot.is_end_of_slot():
# dont validate intermediate vdfs if slot is blue boxed
assert curr_slot.cc_slot_end
if curr_slot.cc_slot_end.normalized_to_identity is True:
log.debug(f"skip intermediate vdfs slot {sub_slot_idx}")
return True
else:
break
idx += 1
if sub_slot_data.icc_infusion_point is not None and sub_slot_data.icc_ip_vdf_info is not None:
input = ClassgroupElement.get_default_element()
if not prev_ssd.is_challenge() and prev_ssd.icc_ip_vdf_info is not None:
input = prev_ssd.icc_ip_vdf_info.output
if not sub_slot_data.icc_infusion_point.is_valid(constants, input, sub_slot_data.icc_ip_vdf_info, None):
log.error(f"failed icc infusion point vdf validation {sub_slot_data.icc_slot_end_info} ")
return False
assert sub_slot_data.signage_point_index is not None
if sub_slot_data.cc_signage_point:
assert sub_slot_data.cc_sp_vdf_info
input = ClassgroupElement.get_default_element()
if not sub_slot_data.cc_signage_point.normalized_to_identity:
is_overflow = is_overflow_block(constants, sub_slot_data.signage_point_index)
input = sub_slot_data_vdf_input(
constants, sub_slot_data, sub_slot_idx, sub_slots, is_overflow, prev_ssd.is_end_of_slot(), ssi
)
if not sub_slot_data.cc_signage_point.is_valid(constants, input, sub_slot_data.cc_sp_vdf_info):
log.error(f"failed cc signage point vdf validation {sub_slot_data.cc_sp_vdf_info}")
return False
input = ClassgroupElement.get_default_element()
assert sub_slot_data.cc_ip_vdf_info
assert sub_slot_data.cc_infusion_point
cc_ip_vdf_info = sub_slot_data.cc_ip_vdf_info
if not sub_slot_data.cc_infusion_point.normalized_to_identity and prev_ssd.cc_slot_end is None:
assert prev_ssd.cc_ip_vdf_info
input = prev_ssd.cc_ip_vdf_info.output
assert sub_slot_data.total_iters
assert prev_ssd.total_iters
ip_vdf_iters = uint64(sub_slot_data.total_iters - prev_ssd.total_iters)
cc_ip_vdf_info = VDFInfo(
sub_slot_data.cc_ip_vdf_info.challenge, ip_vdf_iters, sub_slot_data.cc_ip_vdf_info.output
)
if not sub_slot_data.cc_infusion_point.is_valid(constants, input, cc_ip_vdf_info):
log.error(f"failed cc infusion point vdf validation {sub_slot_data.cc_slot_end_info}")
return False
return True
def sub_slot_data_vdf_input(
constants: ConsensusConstants,
sub_slot_data: SubSlotData,
sub_slot_idx: int,
sub_slots: List[SubSlotData],
is_overflow: bool,
new_sub_slot: bool,
ssi: uint64,
) -> ClassgroupElement:
cc_input = ClassgroupElement.get_default_element()
sp_total_iters = get_sp_total_iters(constants, is_overflow, ssi, sub_slot_data)
ssd: Optional[SubSlotData] = None
if is_overflow and new_sub_slot:
if sub_slot_idx >= 2:
if sub_slots[sub_slot_idx - 2].cc_slot_end_info is None:
for ssd_idx in reversed(range(0, sub_slot_idx - 1)):
ssd = sub_slots[ssd_idx]
if ssd.cc_slot_end_info is not None:
ssd = sub_slots[ssd_idx + 1]
break
if not (ssd.total_iters > sp_total_iters):
break
if ssd and ssd.cc_ip_vdf_info is not None:
if ssd.total_iters < sp_total_iters:
cc_input = ssd.cc_ip_vdf_info.output
return cc_input
elif not is_overflow and not new_sub_slot:
for ssd_idx in reversed(range(0, sub_slot_idx)):
ssd = sub_slots[ssd_idx]
if ssd.cc_slot_end_info is not None:
ssd = sub_slots[ssd_idx + 1]
break
if not (ssd.total_iters > sp_total_iters):
break
assert ssd is not None
if ssd.cc_ip_vdf_info is not None:
if ssd.total_iters < sp_total_iters:
cc_input = ssd.cc_ip_vdf_info.output
return cc_input
elif not new_sub_slot and is_overflow:
slots_seen = 0
for ssd_idx in reversed(range(0, sub_slot_idx)):
ssd = sub_slots[ssd_idx]
if ssd.cc_slot_end_info is not None:
slots_seen += 1
if slots_seen == 2:
return ClassgroupElement.get_default_element()
if ssd.cc_slot_end_info is None and not (ssd.total_iters > sp_total_iters):
break
assert ssd is not None
if ssd.cc_ip_vdf_info is not None:
if ssd.total_iters < sp_total_iters:
cc_input = ssd.cc_ip_vdf_info.output
return cc_input
def _validate_recent_blocks(constants_dict: Dict, recent_chain_bytes: bytes, summaries_bytes: List[bytes]) -> bool:
constants, summaries = bytes_to_vars(constants_dict, summaries_bytes)
recent_chain: RecentChainData = RecentChainData.from_bytes(recent_chain_bytes)
sub_blocks = BlockCache({})
first_ses_idx = _get_ses_idx(recent_chain.recent_chain_data)
ses_idx = len(summaries) - len(first_ses_idx)
ssi: uint64 = constants.SUB_SLOT_ITERS_STARTING
diff: Optional[uint64] = constants.DIFFICULTY_STARTING
last_blocks_to_validate = 100 # todo remove cap after benchmarks
for summary in summaries[:ses_idx]:
if summary.new_sub_slot_iters is not None:
ssi = summary.new_sub_slot_iters
if summary.new_difficulty is not None:
diff = summary.new_difficulty
ses_blocks, sub_slots, transaction_blocks = 0, 0, 0
challenge, prev_challenge = None, None
tip_height = recent_chain.recent_chain_data[-1].height
prev_block_record = None
deficit = uint8(0)
for idx, block in enumerate(recent_chain.recent_chain_data):
required_iters = uint64(0)
overflow = False
ses = False
height = block.height
for sub_slot in block.finished_sub_slots:
prev_challenge = challenge
challenge = sub_slot.challenge_chain.get_hash()
deficit = sub_slot.reward_chain.deficit
if sub_slot.challenge_chain.subepoch_summary_hash is not None:
ses = True
assert summaries[ses_idx].get_hash() == sub_slot.challenge_chain.subepoch_summary_hash
ses_idx += 1
if sub_slot.challenge_chain.new_sub_slot_iters is not None:
ssi = sub_slot.challenge_chain.new_sub_slot_iters
if sub_slot.challenge_chain.new_difficulty is not None:
diff = sub_slot.challenge_chain.new_difficulty
if (challenge is not None) and (prev_challenge is not None):
overflow = is_overflow_block(constants, block.reward_chain_block.signage_point_index)
deficit = get_deficit(constants, deficit, prev_block_record, overflow, len(block.finished_sub_slots))
log.debug(f"wp, validate block {block.height}")
if sub_slots > 2 and transaction_blocks > 11 and (tip_height - block.height < last_blocks_to_validate):
required_iters, error = validate_finished_header_block(
constants, sub_blocks, block, False, diff, ssi, ses_blocks > 2
)
if error is not None:
log.error(f"block {block.header_hash} failed validation {error}")
return False
else:
required_iters = _validate_pospace_recent_chain(
constants, block, challenge, diff, overflow, prev_challenge
)
if required_iters is None:
return False
curr_block_ses = None if not ses else summaries[ses_idx - 1]
block_record = header_block_to_sub_block_record(
constants, required_iters, block, ssi, overflow, deficit, height, curr_block_ses
)
log.debug(f"add block {block_record.height} to tmp sub blocks")
sub_blocks.add_block_record(block_record)
if block.first_in_sub_slot:
sub_slots += 1
if block.is_transaction_block:
transaction_blocks += 1
if ses:
ses_blocks += 1
prev_block_record = block_record
return True
def _validate_pospace_recent_chain(
constants: ConsensusConstants,
block: HeaderBlock,
challenge: bytes32,
diff: uint64,
overflow: bool,
prev_challenge: bytes32,
):
if block.reward_chain_block.challenge_chain_sp_vdf is None:
# Edge case of first sp (start of slot), where sp_iters == 0
cc_sp_hash: bytes32 = challenge
else:
cc_sp_hash = block.reward_chain_block.challenge_chain_sp_vdf.output.get_hash()
assert cc_sp_hash is not None
q_str = block.reward_chain_block.proof_of_space.verify_and_get_quality_string(
constants,
challenge if not overflow else prev_challenge,
cc_sp_hash,
)
if q_str is None:
log.error(f"could not verify proof of space block {block.height} {overflow}")
return None
required_iters = calculate_iterations_quality(
constants.DIFFICULTY_CONSTANT_FACTOR,
q_str,
block.reward_chain_block.proof_of_space.size,
diff,
cc_sp_hash,
)
return required_iters
def __validate_pospace(
constants: ConsensusConstants,
segment: SubEpochChallengeSegment,
idx: int,
curr_diff: uint64,
ses: Optional[SubEpochSummary],
first_in_sub_epoch: bool,
) -> Optional[uint64]:
if first_in_sub_epoch and segment.sub_epoch_n == 0 and idx == 0:
cc_sub_slot_hash = constants.GENESIS_CHALLENGE
else:
cc_sub_slot_hash = __get_cc_sub_slot(segment.sub_slots, idx, ses).get_hash()
sub_slot_data: SubSlotData = segment.sub_slots[idx]
if sub_slot_data.signage_point_index and is_overflow_block(constants, sub_slot_data.signage_point_index):
curr_slot = segment.sub_slots[idx - 1]
assert curr_slot.cc_slot_end_info
challenge = curr_slot.cc_slot_end_info.challenge
else:
challenge = cc_sub_slot_hash
if sub_slot_data.cc_sp_vdf_info is None:
cc_sp_hash = cc_sub_slot_hash
else:
cc_sp_hash = sub_slot_data.cc_sp_vdf_info.output.get_hash()
# validate proof of space
assert sub_slot_data.proof_of_space is not None
q_str = sub_slot_data.proof_of_space.verify_and_get_quality_string(
constants,
challenge,
cc_sp_hash,
)
if q_str is None:
log.error("could not verify proof of space")
return None
return calculate_iterations_quality(
constants.DIFFICULTY_CONSTANT_FACTOR,
q_str,
sub_slot_data.proof_of_space.size,
curr_diff,
cc_sp_hash,
)
def __get_rc_sub_slot(
constants: ConsensusConstants,
segment: SubEpochChallengeSegment,
summaries: List[SubEpochSummary],
curr_ssi: uint64,
) -> RewardChainSubSlot:
ses = summaries[uint32(segment.sub_epoch_n - 1)]
# find first challenge in sub epoch
first_idx = None
first = None
for idx, curr in enumerate(segment.sub_slots):
if curr.cc_slot_end is None:
first_idx = idx
first = curr
break
assert first_idx
idx = first_idx
slots = segment.sub_slots
# number of slots to look for
slots_n = 1
assert first
assert first.signage_point_index is not None
if is_overflow_block(constants, first.signage_point_index):
if idx >= 2 and slots[idx - 2].cc_slot_end is None:
slots_n = 2
new_diff = None if ses is None else ses.new_difficulty
new_ssi = None if ses is None else ses.new_sub_slot_iters
ses_hash = None if ses is None else ses.get_hash()
overflow = is_overflow_block(constants, first.signage_point_index)
if overflow:
if idx >= 2 and slots[idx - 2].cc_slot_end is not None and slots[idx - 1].cc_slot_end is not None:
ses_hash = None
new_ssi = None
new_diff = None
sub_slot = slots[idx]
while True:
if sub_slot.cc_slot_end:
slots_n -= 1
if slots_n == 0:
break
idx -= 1
sub_slot = slots[idx]
icc_sub_slot_hash: Optional[bytes32] = None
assert sub_slot is not None
assert sub_slot.cc_slot_end_info is not None
assert segment.rc_slot_end_info is not None
if idx != 0:
cc_vdf_info = VDFInfo(sub_slot.cc_slot_end_info.challenge, curr_ssi, sub_slot.cc_slot_end_info.output)
if sub_slot.icc_slot_end_info is not None:
icc_slot_end_info = VDFInfo(
sub_slot.icc_slot_end_info.challenge, curr_ssi, sub_slot.icc_slot_end_info.output
)
icc_sub_slot_hash = icc_slot_end_info.get_hash()
else:
cc_vdf_info = sub_slot.cc_slot_end_info
if sub_slot.icc_slot_end_info is not None:
icc_sub_slot_hash = sub_slot.icc_slot_end_info.get_hash()
cc_sub_slot = ChallengeChainSubSlot(
cc_vdf_info,
icc_sub_slot_hash,
ses_hash,
new_ssi,
new_diff,
)
rc_sub_slot = RewardChainSubSlot(
segment.rc_slot_end_info,
cc_sub_slot.get_hash(),
icc_sub_slot_hash,
constants.MIN_BLOCKS_PER_CHALLENGE_BLOCK,
)
return rc_sub_slot
def __get_cc_sub_slot(sub_slots: List[SubSlotData], idx, ses: Optional[SubEpochSummary]) -> ChallengeChainSubSlot:
sub_slot: Optional[SubSlotData] = None
for i in reversed(range(0, idx)):
sub_slot = sub_slots[i]
if sub_slot.cc_slot_end_info is not None:
break
assert sub_slot is not None
assert sub_slot.cc_slot_end_info is not None
icc_vdf = sub_slot.icc_slot_end_info
icc_vdf_hash: Optional[bytes32] = None
if icc_vdf is not None:
icc_vdf_hash = icc_vdf.get_hash()
cc_sub_slot = ChallengeChainSubSlot(
sub_slot.cc_slot_end_info,
icc_vdf_hash,
None if ses is None else ses.get_hash(),
None if ses is None else ses.new_sub_slot_iters,
None if ses is None else ses.new_difficulty,
)
return cc_sub_slot
def _get_curr_diff_ssi(constants: ConsensusConstants, idx, summaries):
curr_difficulty = constants.DIFFICULTY_STARTING
curr_ssi = constants.SUB_SLOT_ITERS_STARTING
for ses in reversed(summaries[0:idx]):
if ses.new_sub_slot_iters is not None:
curr_ssi = ses.new_sub_slot_iters
curr_difficulty = ses.new_difficulty
break
return curr_difficulty, curr_ssi
def vars_to_bytes(constants, summaries, weight_proof):
constants_dict = recurse_jsonify(dataclasses.asdict(constants))
wp_recent_chain_bytes = bytes(RecentChainData(weight_proof.recent_chain_data))
wp_segment_bytes = bytes(SubEpochSegments(weight_proof.sub_epoch_segments))
summary_bytes = []
for summary in summaries:
summary_bytes.append(bytes(summary))
return constants_dict, summary_bytes, wp_segment_bytes, wp_recent_chain_bytes
def bytes_to_vars(constants_dict, summaries_bytes):
summaries = []
for summary in summaries_bytes:
summaries.append(SubEpochSummary.from_bytes(summary))
constants: ConsensusConstants = dataclass_from_dict(ConsensusConstants, constants_dict)
return constants, summaries
def _get_last_ses_hash(
constants: ConsensusConstants, recent_reward_chain: List[HeaderBlock]
) -> Tuple[Optional[bytes32], uint32]:
for idx, block in enumerate(reversed(recent_reward_chain)):
if (block.reward_chain_block.height % constants.SUB_EPOCH_BLOCKS) == 0:
idx = len(recent_reward_chain) - 1 - idx # reverse
# find first block after sub slot end
while idx < len(recent_reward_chain):
curr = recent_reward_chain[idx]
if len(curr.finished_sub_slots) > 0:
for slot in curr.finished_sub_slots:
if slot.challenge_chain.subepoch_summary_hash is not None:
return (
slot.challenge_chain.subepoch_summary_hash,
curr.reward_chain_block.height,
)
idx += 1
return None, uint32(0)
def _get_ses_idx(recent_reward_chain: List[HeaderBlock]) -> List[int]:
idxs: List[int] = []
for idx, curr in enumerate(recent_reward_chain):
if len(curr.finished_sub_slots) > 0:
for slot in curr.finished_sub_slots:
if slot.challenge_chain.subepoch_summary_hash is not None:
idxs.append(idx)
return idxs
def get_deficit(
constants: ConsensusConstants,
curr_deficit: uint8,
prev_block: BlockRecord,
overflow: bool,
num_finished_sub_slots: int,
) -> uint8:
if prev_block is None:
if curr_deficit >= 1 and not (overflow and curr_deficit == constants.MIN_BLOCKS_PER_CHALLENGE_BLOCK):
curr_deficit -= 1
return curr_deficit
return calculate_deficit(constants, uint32(prev_block.height + 1), prev_block, overflow, num_finished_sub_slots)
def get_sp_total_iters(constants: ConsensusConstants, is_overflow: bool, ssi: uint64, sub_slot_data: SubSlotData):
assert sub_slot_data.cc_ip_vdf_info is not None
assert sub_slot_data.total_iters is not None
assert sub_slot_data.signage_point_index is not None
sp_iters: uint64 = calculate_sp_iters(constants, ssi, sub_slot_data.signage_point_index)
ip_iters: uint64 = sub_slot_data.cc_ip_vdf_info.number_of_iterations
sp_sub_slot_total_iters = uint128(sub_slot_data.total_iters - ip_iters)
if is_overflow:
sp_sub_slot_total_iters = uint128(sp_sub_slot_total_iters - ssi)
return sp_sub_slot_total_iters + sp_iters
def blue_boxed_end_of_slot(sub_slot: EndOfSubSlotBundle):
if sub_slot.proofs.challenge_chain_slot_proof.normalized_to_identity:
if sub_slot.proofs.infused_challenge_chain_slot_proof is not None:
if sub_slot.proofs.infused_challenge_chain_slot_proof.normalized_to_identity:
return True
else:
return True
return False
def validate_sub_epoch_sampling(rng, sub_epoch_weight_list, weight_proof):
tip = weight_proof.recent_chain_data[-1]
weight_to_check = _get_weights_for_sampling(rng, tip.weight, weight_proof.recent_chain_data)
sampled_sub_epochs: dict[int, bool] = {}
for idx in range(1, len(sub_epoch_weight_list)):
if _sample_sub_epoch(sub_epoch_weight_list[idx - 1], sub_epoch_weight_list[idx], weight_to_check):
sampled_sub_epochs[idx - 1] = True
if len(sampled_sub_epochs) == WeightProofHandler.MAX_SAMPLES:
break
curr_sub_epoch_n = -1
for sub_epoch_segment in weight_proof.sub_epoch_segments:
if curr_sub_epoch_n < sub_epoch_segment.sub_epoch_n:
if sub_epoch_segment.sub_epoch_n in sampled_sub_epochs:
del sampled_sub_epochs[sub_epoch_segment.sub_epoch_n]
curr_sub_epoch_n = sub_epoch_segment.sub_epoch_n
if len(sampled_sub_epochs) > 0:
return False
return True
def map_segments_by_sub_epoch(sub_epoch_segments) -> Dict[int, List[SubEpochChallengeSegment]]:
segments: Dict[int, List[SubEpochChallengeSegment]] = {}
curr_sub_epoch_n = -1
for idx, segment in enumerate(sub_epoch_segments):
if curr_sub_epoch_n < segment.sub_epoch_n:
curr_sub_epoch_n = segment.sub_epoch_n
segments[curr_sub_epoch_n] = []
segments[curr_sub_epoch_n].append(segment)
return segments
def validate_total_iters(
segment: SubEpochChallengeSegment,
sub_slot_data_idx,
expected_sub_slot_iters: uint64,
finished_sub_slots_since_prev: int,
prev_b: SubSlotData,
prev_sub_slot_data_iters,
genesis,
) -> bool:
sub_slot_data = segment.sub_slots[sub_slot_data_idx]
if genesis:
total_iters: uint128 = uint128(expected_sub_slot_iters * finished_sub_slots_since_prev)
elif segment.sub_slots[sub_slot_data_idx - 1].is_end_of_slot():
assert prev_b.total_iters
assert prev_b.cc_ip_vdf_info
total_iters = prev_b.total_iters
# Add the rest of the slot of prev_b
total_iters = uint128(total_iters + prev_sub_slot_data_iters - prev_b.cc_ip_vdf_info.number_of_iterations)
# Add other empty slots
total_iters = uint128(total_iters + (expected_sub_slot_iters * (finished_sub_slots_since_prev - 1)))
else:
# Slot iters is guaranteed to be the same for header_block and prev_b
# This takes the beginning of the slot, and adds ip_iters
assert prev_b.cc_ip_vdf_info
assert prev_b.total_iters
total_iters = uint128(prev_b.total_iters - prev_b.cc_ip_vdf_info.number_of_iterations)
total_iters = uint128(total_iters + sub_slot_data.cc_ip_vdf_info.number_of_iterations)
return total_iters == sub_slot_data.total_iters
|
[
"faurepierre78@yahoo.com"
] |
faurepierre78@yahoo.com
|
81aee84f897fcf6c6ae6f9a8c473d7c9123dda6d
|
e6db96ae32326be9b448f4c3bff158a295036571
|
/tensorFlowStudy/classification_test.py
|
95c54c9b0eba777168868053a5ce2ffcab877578
|
[] |
no_license
|
houweitao/TensorFlow
|
f2e65285f01b52df282bdecc2e01e2e30e3b9b43
|
d8a70592dde70e37f47aae2649b3003b746188f7
|
refs/heads/master
| 2021-01-12T14:59:27.935750
| 2017-01-13T07:48:34
| 2017-01-13T07:48:34
| 71,659,930
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,046
|
py
|
# -*- coding: utf-8 -*-
# _author_ = 'hou'
# _project_: classification_test
# _date_ = 16/10/23 下午4:15
# https://www.youtube.com/watch?v=aNjdw9w_Qyc&index=17&list=PLXO45tsB95cKI5AIlf5TxxFPzb-0zeVZ8
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
import add_layer as layer
# data http://yann.lecun.com/exdb/mnist/
mnist = input_data.read_data_sets('MNIST_data', one_hot=True)
# define placeholder
xs = tf.placeholder(tf.float32, [None, 784])
# xs = tf.placeholder(tf.float(32), [None, 784]) # 28*28个像素点
ys = tf.placeholder(tf.float32, [None, 10]) # 10个输出
# add output layer
predication = layer.add_layer(xs, 784, 10, activation_function=tf.nn.softmax)
# another
# b = tf.Variable(tf.zeros([10]))
# W = tf.Variable(tf.zeros([784,10]))
# predication= tf.nn.softmax(tf.matmul(xs,W) + b);
# loss
# neg?
cross_entropy = tf.reduce_mean(-tf.reduce_sum(ys * tf.log(predication), reduction_indices=[1]))
train_step = tf.train.GradientDescentOptimizer(0.1).minimize(cross_entropy)
sess = tf.Session()
sess.run(tf.initialize_all_variables())
def compute_accuracy(v_xs, v_ys):
global predication
y_pre = sess.run(predication, feed_dict={xs: v_xs})
correct_prediction = tf.equal(tf.argmax(y_pre, 1), tf.argmax(v_ys, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
result = sess.run(accuracy, feed_dict={xs: v_xs, ys: v_ys})
return result
# def compute_accuracy1(v_xs, v_ys):
# global predication
# y_pre = sess.run(predication, feed_dict={xs: v_xs})
# correct_prediction = tf.equal(tf.argmax(y_pre, 1), tf.argmax(v_ys, 1))
# accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
# result = sess.run(accuracy, feed_dict={xs: v_xs, ys: v_ys})
# return result
for step in range(1000):
batch_xs, batch_ys = mnist.train.next_batch(100) # 每次取100
sess.run(train_step, feed_dict={xs: batch_xs, ys: batch_ys})
if step % 50 == 0:
print compute_accuracy(mnist.test.images, mnist.test.labels)
|
[
"hou103880@163.com"
] |
hou103880@163.com
|
b0a496a2adad7d4299f3c94ceb3f5651a373a629
|
ee8c4c954b7c1711899b6d2527bdb12b5c79c9be
|
/assessment2/amazon/run/core/controllers/manage.py
|
436b9b54ecb5b87023cfad764e11bb94a803445a
|
[] |
no_license
|
sqlconsult/byte
|
02ac9899aebea4475614969b594bfe2992ffe29a
|
548f6cb5038e927b54adca29caf02c981fdcecfc
|
refs/heads/master
| 2021-01-25T14:45:42.120220
| 2018-08-11T23:45:31
| 2018-08-11T23:45:31
| 117,135,069
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 366
|
py
|
#!/usr/bin/env python3
from flask import Blueprint, Flask, render_template, request, url_for
controller = Blueprint('manage', __name__, url_prefix='/manage')
# @controller.route('/<string:title>', methods=['GET'])
# def lookup(title):
# if title == 'Republic': # TODO 2
# return render_template('republic.html') # TODO 2
# else:
# pass
|
[
"sqlconsult@hotmail.com"
] |
sqlconsult@hotmail.com
|
393fe84cca759b9236a600d0c71f10ffda0a904a
|
fc78f7cfa4e63e5d6df787d4bd5ba58d50ac2895
|
/manage.py
|
81154eb46b1ff68007a6fa716702021e1dc4c026
|
[] |
no_license
|
Erecik1/boostingweb
|
d608c109d61d4e2f0badd9af5477e10f4b780291
|
e0032c039b1a527dccc76a4b1cf5e04355001aa3
|
refs/heads/master
| 2023-08-01T03:43:19.992022
| 2021-09-22T22:03:19
| 2021-09-22T22:03:19
| 405,763,170
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 661
|
py
|
#!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
"""Run administrative tasks."""
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'tipsy.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
|
[
"eret112@gmail.com"
] |
eret112@gmail.com
|
6dd47cf9abf6588f76b33b1300c80b06fe34f86b
|
304e75224229786ba64c6ef2124007c305019b23
|
/src/easy/test_build_array_from_permutation.py
|
8fd8efbd03f279c3c5d2f1ed987d934e5687eadc
|
[] |
no_license
|
Takuma-Ikeda/other-LeetCode
|
9179a8100e07d56138fd3f3f626951195e285da2
|
499616d07011bee730b9967e9861e341e62d606d
|
refs/heads/master
| 2023-04-14T06:09:35.341039
| 2023-04-10T02:29:18
| 2023-04-10T02:29:18
| 226,260,312
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 637
|
py
|
import unittest
from answer.build_array_from_permutation import Solution
class TestSolution(unittest.TestCase):
def setUp(self):
self.nums = [
[0, 2, 1, 5, 3, 4],
[5, 0, 1, 2, 3, 4],
]
self.answers = [
[0, 1, 2, 4, 5, 3],
[4, 5, 0, 1, 2, 3],
]
def solution(self, i):
s = Solution()
result = s.buildArray(self.nums[i])
self.assertEqual(self.answers[i], result)
def test_solution0(self):
self.solution(0)
def test_solution1(self):
self.solution(1)
if __name__ == "__main__":
unittest.main()
|
[
"el.programdear@gmail.com"
] |
el.programdear@gmail.com
|
652e8c01463ca031788666db93024bbc761ec75d
|
14856ffe01c711af7a41af0b1abf0378ba4ffde6
|
/Python/Fundamentals/Fun_with_Functions.py
|
4db600213841d74d4382c1514cc6f369abdc29a8
|
[] |
no_license
|
sharonanchel/coding-dojo
|
9a8db24eec17b0ae0c220592e6864510297371c3
|
d6c4a7efd0804353b27a49e16255984c4f4b7f2a
|
refs/heads/master
| 2021-05-05T18:17:48.101853
| 2017-06-23T23:53:51
| 2017-06-23T23:53:51
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 531
|
py
|
# Odd/Even
def odd_even():
for i in range(1,2001):
if i%2 == 0:
type = 'even'
else:
type = 'odd'
print 'Number is',i, 'This is an',type,'number.'
odd_even()
# Multiply
def multiply(arr, num):
for i in range(0,len(arr)):
arr[i] *= num
return arr
print multiply([2,4,10,16],5)
# Hacker Challenge
def layered_multiples(arr):
newerArray = []
for i in arr:
newArray = []
for x in range(0,i):
newArray.append(1)
newerArray.append(newArray)
return newerArray
print layered_multiples(multiply([2,4,5],3))
|
[
"jao.colin@gmail.com"
] |
jao.colin@gmail.com
|
b32d0c2672ca5d2afe58d2b5c3af3ad37e89ffba
|
f23c9196b21e4ff189d2c8399f4a77de2813d2b2
|
/tests/Python + Tornado/simpleserver.py
|
1f5bb8f4f2c28b1d9f901d93d82e36fa91b26a74
|
[] |
no_license
|
gchatelet/web-performance-tests
|
bcf903135bfcdc46a485c5a0acb9175d125ab3a2
|
3e0b526132abf840dfbc9dd235a94dd4713f9c9b
|
refs/heads/master
| 2020-04-14T23:54:02.606248
| 2013-03-18T11:10:18
| 2013-03-18T11:10:18
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 369
|
py
|
import tornado.ioloop
import tornado.web
class GreetingResourceHandler(tornado.web.RequestHandler):
def get(self):
self.write("Hello, " + self.get_argument('name', True))
application = tornado.web.Application([
(r"/", GreetingResourceHandler),
])
if __name__ == "__main__":
application.listen(8080)
tornado.ioloop.IOLoop.instance().start()
|
[
"chatelet.guillaume@gmail.com"
] |
chatelet.guillaume@gmail.com
|
4dd3029190c967aa1021f127e1f73e6489ab3fb7
|
b012b7ce6a371511de44eee63e8bcceb29ae4a23
|
/manage.py
|
2aee2cf4a1d44bbe7e9e6f5984a50da229840468
|
[] |
no_license
|
Srinivasu-Gillella/djtemplates1
|
f23974b7af13f64717eeb9d8547cc38e046e7383
|
f77b700e6a444362c2c4c68b7836bf6f417a1a96
|
refs/heads/master
| 2022-12-16T04:12:19.059026
| 2020-09-28T12:10:16
| 2020-09-28T12:10:16
| 299,296,765
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 632
|
py
|
#!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'djtemplates1.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
|
[
"64829989+Srinivasu-Gillella@users.noreply.github.com"
] |
64829989+Srinivasu-Gillella@users.noreply.github.com
|
79c1cfdd225efbe367313be720d75fd7941a44b2
|
4eebce0d0c1132aed8227325bd58faf61a4010c7
|
/CONTEXT_178/d2.py
|
7a83e4ac92f0948ab14911f4a674624665be9101
|
[] |
no_license
|
xu1718191411/AT_CODE_BEGINNER_SELECTION
|
05836cfcc63dab2a0a82166c8f4c43c82b72686b
|
e4e412733d1a632ce6c33c739064fe036367899e
|
refs/heads/master
| 2021-07-17T00:59:46.315645
| 2020-09-22T06:14:27
| 2020-09-22T06:14:27
| 214,153,758
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 251
|
py
|
def calculate(n):
arr = [0 for i in range(2000 + 1)]
arr[3] = 1
arr[4] = 1
arr[5] = 1
for i in range(6, n + 1):
arr[i] = (arr[i - 2] + arr[i - 3] + arr[i - 4]) % (1000000000 + 7)
print(arr[n])
calculate(int(input()))
|
[
"xu1718191411@gmail.com"
] |
xu1718191411@gmail.com
|
e5f68e03c3e1f9546ede6129f969da71ae852f20
|
2369cb94be22a6dcaf5694faf7b40a440d24c3ef
|
/rally_plugins/scenarios/kubernetes/replication_controllers.py
|
bc305f074bf4bebeb74cfd3c4dc2a36732466f0b
|
[
"Apache-2.0"
] |
permissive
|
xinhuihu/rally-plugins
|
c1c5c9a595c4fbe23e81923da224a7ddd06c15dc
|
a26fe046862b4fcf695dd237586134f81953d707
|
refs/heads/master
| 2020-08-09T13:38:53.745630
| 2019-08-02T10:33:57
| 2019-08-02T10:33:57
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,445
|
py
|
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from rally.task import scenario
from rally_plugins.scenarios.kubernetes import common as common_scenario
@scenario.configure(name="Kubernetes.create_and_delete_replication_controller",
platform="kubernetes")
class RCCreateAndDelete(common_scenario.BaseKubernetesScenario):
"""Kubernetes replication controller create and delete test.
Choose created namespace, create replication controller with defined image
and number of replicas, wait until it won't be running and delete it after.
"""
def run(self, image, replicas, command=None, status_wait=True):
"""Create and delete replication controller.
:param replicas: number of replicas for replication controller
:param image: replication controller image
:param command: array of strings representing container command
:param status_wait: wait replication controller status
"""
namespace = self.choose_namespace()
name = self.client.create_rc(
replicas=replicas,
image=image,
namespace=namespace,
command=command,
status_wait=status_wait
)
self.client.delete_rc(
name,
namespace=namespace,
status_wait=status_wait
)
@scenario.configure(
name="Kubernetes.create_scale_and_delete_replication_controller",
platform="kubernetes"
)
class CreateScaleAndDeleteRCPlugin(common_scenario.BaseKubernetesScenario):
"""Kubernetes replication controller scale test.
Create replication controller, scale it with number of replicas,
scale it with original number of replicas, delete replication controller.
"""
def run(self, image, replicas, scale_replicas, command=None,
status_wait=True):
"""Create RC, scale with replicas, revert scale and then delete it.
:param image: RC pod template image
:param replicas: original number of replicas
:param scale_replicas: number of replicas to scale
:param command: array of strings representing container command
:param status_wait: wait replication controller status
"""
namespace = self.choose_namespace()
name = self.client.create_rc(
namespace=namespace,
replicas=replicas,
image=image,
command=command,
status_wait=status_wait
)
self.client.scale_rc(
name,
namespace=namespace,
replicas=scale_replicas,
status_wait=status_wait
)
self.client.scale_rc(
name,
namespace=namespace,
replicas=replicas,
status_wait=status_wait
)
self.client.delete_rc(
name,
namespace=namespace,
status_wait=status_wait
)
|
[
"prazumovsky@mirantis.com"
] |
prazumovsky@mirantis.com
|
5f6535ea4fadf155e1c96cc0508e31a1a8227986
|
9c0bb2bd2788bac007f857eca11a672751c8d808
|
/hello_world.py
|
a2a497ef8c45ee1717f3cbe8af39a268a1c98ac3
|
[] |
no_license
|
michaelorr/10gen
|
e52b5ff697fa845ab523e1268b38502b4bb61c61
|
e938d92a291c1986deb51de84043efff446bc170
|
refs/heads/master
| 2016-09-06T16:00:28.218176
| 2013-02-24T18:48:11
| 2013-02-24T18:48:11
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 177
|
py
|
from bottle import route, run, template
@route('/hello/:name')
def index(name='World'):
return template('<b>Hello {{name}}</b>', name=name)
run(host='localhost', port=8888)
|
[
"michael@orr.co"
] |
michael@orr.co
|
4d3d7789762162ec46b85085c7c5971a1c7ab7cc
|
aab2f73ce75434ae127a00c708ec14e29d99dfec
|
/fermi_level/carrier_con_instrinsic_semi.py
|
7f2d08137e2169990632993d6e2db1a9438df8a2
|
[] |
no_license
|
dadhikar/semiconductor_physics_and_modeling
|
29a7c008d93906d40d82a6ef4657da7c28830a6a
|
8e9c082daaf41b228641bc6741e04a491eb7a06f
|
refs/heads/master
| 2022-11-29T04:26:00.530412
| 2020-08-18T21:08:18
| 2020-08-18T21:08:18
| 256,613,677
| 4
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,986
|
py
|
"""
For an intrinsic semiconductor, calculating electron density
at conduction band.
This involves solving Fermi-Dirac integral
"""
# importing required libraries
import numpy as np
import matplotlib.pyplot as plt
import matplotlib as mpl
mpl.rcParams['xtick.direction'] = 'in'
mpl.rcParams['ytick.direction'] = 'in'
mpl.rcParams['xtick.top'] = True
mpl.rcParams['ytick.right'] = True
from scipy.integrate import quad
def fermi_dirac_dist(e, ef, T):
"""
define Fermi-Dirac distribution function
where, e - energy level in eV
ef - fermi energy level in eV
T - temperature
return:
probability value for the distribution
"""
kb = 8.33e-5 # Boltzmann constant in eV/K
if T == 0.0:
if e <= ef:
f2 = 1.0
else:
f2 = 0.0
else:
f1 = np.exp((e-ef)/(kb*T))
f2 = 1/(1+f1)
return f2
# T = np.linspace(2, 100, 20, endpoint=True)
# e = np.linspace(0.2, 0.4, 50, endpoint=True)
# print(T)
# f = fermi_dirac_dist(e, 0.3, 300)
# plt.plot(e, f)
# plt.show()
def density_of_states(e, ec):
"""
Density of states near the bottom of the conduction band
for low-enough carrier density and temperature
ec (in eV) - conduction band edge
e (in eV)- energy value close to ec
"""
me = 0.5110e6 # electron mass (in eV)
factor = 0.91 # this will be material dependent
meff = factor*me # effective electron mass
h_cross = 6.582e-16 # in eV-s
f1 = (np.sqrt(2)/np.power(np.pi, 2))*np.power(meff, 1.5)
f2 = np.power(e-ec, 0.5)/np.power(h_cross, 3)
return f1*f2
# print(density_of_states(0.302, 0.3))
def fermi_dirac_integrand(x, xf):
"""
x = (e-ec)/kB*T
xf = (ef-ec)/kB*T
ef = Fermi enegry in eV
ec = conduction band edge in eV
kB = Boltzmann constant
T = Temperature
"""
return np.power(x, 0.5)/(1+np.exp(x-xf))
def fermi_dirac_integral(xf):
"""
"""
integral_value, _ = quad(func= fermi_dirac_integrand, a=0, b=10, args=(xf),
full_output=0, epsabs=1.49e-08, epsrel=1.49e-08, limit=50,
points=None, weight=None, wvar=None, wopts=None, maxp1=50, limlst=50)
return integral_value
fermi_integral = []
xf = np.linspace(-10, 10, 1000)
for x in xf:
integral_value = fermi_dirac_integral(x)
# print(xf, integral_value)
fermi_integral.append(integral_value)
plt.semilogy(xf, np.asarray(fermi_integral), 'ro', ms=2.5, label=r'Fermi-Dirac')
plt.semilogy(xf, 0.5*np.power(np.pi, 0.5)*np.exp(xf), 'ko', ms=2.5, label=r'Boltzmann approx.' )
plt.vlines(x=0.0, ymin= 1e-5, ymax= 30, colors='g', linestyles='--',linewidth=2.0)
plt.xlabel(r'(E$_{f}$ - E$_{c}$) / k$_{B}$T [no unit]')
plt.ylabel('Fermi-Dirac Integral [ab. unit]')
plt.xlim(-10, 10)
plt.ylim(1e-5, 25)
plt.legend()
#plt.text(0.55, 0.5, r'E$_{f}$ = 0.5 eV', c='r', fontsize=12)
plt.title(r'Intrinsic Semiconductor')
plt.show()
|
[
"dadhikar@dadhikar.local"
] |
dadhikar@dadhikar.local
|
a959feaae80f94e9b538502bda97297c7f29dc52
|
0d7085e202a232b419e4a2d0efca07ec30b474e6
|
/src/storage.py
|
6070abacfb99463a396ca4d48626e45a122dec50
|
[] |
no_license
|
maconel/fastrun
|
8a0880cb4078d93da2f8ae24ab52044efb34e78b
|
131701aa6f95b9692965461d395d8a7d0e5f6c13
|
refs/heads/master
| 2021-01-01T19:39:28.499482
| 2011-10-30T13:37:06
| 2011-10-30T13:37:06
| 2,520,012
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,915
|
py
|
# -*- coding: utf-8 -*-
from __future__ import with_statement
import os
import sys
import pinyinlib
datafilepath = r'..\data\data.txt'
#name |path |cmd0 :cmd1 :cmd2 |priority
#记事本|c:\windows\notepad.exe|记事本:notepad.ex:jishibene|18
class Item(object):
def __init__(self, name, path, cmd, priority):
self.name = name
self.path = path
self.cmd = cmd
self.priority = priority
class Storage(object):
def __init__(self):
self.items = []
def load(self):
self.items = []
with file(os.path.join(curfilepath(), datafilepath), 'rt') as f:
lineno = 0
for line in f:
fields = line.rstrip('\r\n').split('|')
if len(fields) != 4:
continue
self.items.append(Item(fields[0], fields[1], fields[2].lower().split(':'), int(fields[3])))
lineno += 1
def raise_priority(self, item):
item.priority += 1
self.items.sort(key=lambda(item):item.priority, reverse=True)
with file(os.path.join(curfilepath(), datafilepath), 'wt') as f:
for item in self.items:
f.write(self.item_to_str(item))
f.write('\n')
def item_to_str(self, item):
return '|'.join((item.name, item.path, ':'.join(item.cmd), '%04d' % item.priority))
def add(self, name, path):
pinyinlist = pinyinlib.wordlist_to_pinyin(name)
item = Item(name, path, ':'.join((name, os.path.basename(path), ':'.join(pinyinlist))), 0)
self.items.append(item)
with file(os.path.join(curfilepath(), datafilepath), 'at') as f:
f.write(self.item_to_str(item))
f.write('\n')
def curfilepath():
return os.path.dirname(os.path.abspath(os.path.join(os.getcwd(), __file__)))
|
[
"maconel.reg@gmail.com"
] |
maconel.reg@gmail.com
|
cf384ca62ab9185a7636264fbac38c353fd2bb0c
|
a7c6cf9663f1db1ed407f70e3ee2578ee38b0eca
|
/main.py
|
e452961320167ed3fba64a2ba5ca1561ae0dfe9e
|
[
"MIT"
] |
permissive
|
theSlayer4089/EasyFNBotGlitch
|
bfce879c92c56a239b698b4802c45a04e1452053
|
fd214dc3984092b3a845b7ab6960ba389893dc50
|
refs/heads/master
| 2020-12-05T22:36:01.536330
| 2020-01-05T16:45:38
| 2020-01-05T16:45:38
| 232,265,289
| 1
| 0
|
MIT
| 2020-01-07T07:12:19
| 2020-01-07T07:12:19
| null |
UTF-8
|
Python
| false
| false
| 8,429
|
py
|
import json,fortniteAPI,functions,MultipleClients,os,UpdateCheck
from functools import partial
from Events import ready,friends,party,message
from threading import Thread
try:
import fortnitepy,asyncio,aiofiles
from termcolor import colored
from flask import Flask
except:
os.system("pip3 install --user -r requirements.txt")
Settings = json.loads(open("Settings.json").read())
Languages = ["ar","de","es-419","es","en","fr","it","ja","ko","pl","pt-BR","ru","tr","zh-CN","zh-Hant"]
fortniteClient = fortnitepy.Client(email=Settings["Email"],password=Settings["Password"],status="Join my Discord\nIf you want your own bot\nhttps://discord.gg/jxgZH6Z\nOr Follow me on Twitter\n@LupusLeaks")
fortniteClient.Settings = Settings
fortniteClient.Clients = {}
fortniteClient.RemovingFriends = False
default_party_member = []
default_party = {}
#Default language
if Settings["Default item search language"] in Languages:
fortniteClient.DefaultLang = Settings["Default item search language"].lower()
else:
print(f'ERROR: Couldn\'t find {Settings["DefaultItemSearchLanguage"]} as a language')
fortniteClient.DefaultLang = "en"
#Banner
SeasonLevel=1000
if Settings["Default season level"] and type(Settings["Default season level"]) == int:
SeasonLevel = Settings["Default season level"]
else:
print(f'ERROR: {Settings["Default season level"]} is invaild, make sure you only use numbers')
default_party_member.append(partial(fortnitepy.ClientPartyMember.set_banner,season_level=SeasonLevel,icon=Settings["Default banner"],color=Settings["Default banner color"]))
#Platform + Privacy
if Settings["Platform"].upper() in fortnitepy.Platform.__members__:
fortniteClient.platform = fortnitepy.Platform[Settings["Platform"].upper()]
if Settings["Privacy"].upper() in fortnitepy.PartyPrivacy.__members__:
default_party["privacy"] = fortnitepy.PartyPrivacy[Settings["Privacy"].upper()]
#Cosmetics
#Backpack
if Settings["Default backpack"] and not Settings["Default pet"]:
Backpack = fortniteAPI.SGetBackpack(Settings["Default backpack"],fortniteClient.DefaultLang)
if not "status" in Backpack:
v = []
if Settings["Default backpack varaint channel name"] and Settings["Default backpack varaint name"] and Backpack["variants"]["en"]:
VariantChannelName = Settings["Default backpack varaint channel name"].upper()
Variant = Settings["Default backpack varaint name"].upper()
for variant in Backpack["variants"]["en"]:
if variant["type"].upper() == VariantChannelName:
for tag in variant["options"]:
if tag["name"].upper() == Variant:
v.append(functions.create_variant(variant["channel"],tag["tag"],item="AthenaBackpack"))
default_party_member.append(partial(fortnitepy.ClientPartyMember.set_backpack,asset=f'{str(Backpack["path"]).replace("FortniteGame/Content","/Game")}.{Backpack["id"]}',variants=v))
#Skin
if Settings["Default skin"]:
Skin = fortniteAPI.SGetSkin(Settings["Default skin"],fortniteClient.DefaultLang)
if not "status" in Skin:
v = []
if Settings["Default skin varaint channel name"] and Settings["Default skin varaint name"] and Skin["variants"]["en"]:
VariantChannelName = Settings["Default skin varaint channel name"].upper()
Variant = Settings["Default skin varaint name"].upper()
for variant in Skin["variants"]["en"]:
if variant["type"].upper() == VariantChannelName:
for tag in variant["options"]:
if tag["name"].upper() == Variant:
v.append(functions.create_variant(variant["channel"],tag["tag"],item="AthenaCharacter"))
default_party_member.append(partial(fortnitepy.ClientPartyMember.set_outfit,asset=f'{str(Skin["path"]).replace("FortniteGame/Content","/Game")}.{Skin["id"]}',variants=v))
#Pickaxe
if Settings["Default pickaxe"]:
Pickaxe = fortniteAPI.SGetPickaxe(Settings["Default pickaxe"],fortniteClient.DefaultLang)
if not "status" in Pickaxe:
v = []
if Settings["Default pickaxe varaint channel name"] and Settings["Default pickaxe varaint name"] and Pickaxe["variants"]["en"]:
VariantChannelName = Settings["Default pickaxe varaint channel name"].upper()
Variant = Settings["Default pickaxe varaint name"].upper()
for variant in Pickaxe["variants"]["en"]:
if variant["type"].upper() == VariantChannelName:
for tag in variant["options"]:
if tag["name"].upper() == Variant:
v.append(functions.create_variant(variant["channel"],tag["tag"],item="AthenaPickaxe"))
default_party_member.append(partial(fortnitepy.ClientPartyMember.set_pickaxe,asset=f'{str(Pickaxe["path"]).replace("FortniteGame/Content","/Game")}.{Pickaxe["id"]}',variants=v))
#Pet
if Settings["Default pet"]:
Pet = fortniteAPI.SGetPet(Settings["Default pet"],fortniteClient.DefaultLang)
if not "status" in Pet:
v = []
if Settings["Default pet varaint channel name"] and Settings["Default pet varaint name"] and Pet["variants"]["en"]:
VariantChannelName = Settings["Default pet varaint channel name"].upper()
Variant = Settings["Default pet varaint name"].upper()
for variant in Pickaxe["variants"]["en"]:
if variant["type"].upper() == VariantChannelName:
for tag in variant["options"]:
if tag["name"].upper() == Variant:
v.append(functions.create_variant(variant["channel"],tag["tag"],item="AthenaPetCarrier"))
default_party_member.append(partial(fortnitepy.ClientPartyMember.set_backpack,asset=f'{str(Pet["path"]).replace("FortniteGame/Content","/Game")}.{Pet["id"]}',variants=v))
fortniteClient.default_party_config = default_party
fortniteClient.default_party_member_config = default_party_member
@fortniteClient.event
async def event_ready():
fortniteClient.starting = True
fortniteClient.mainID = fortniteClient.user.id
tasks = []
for email,password in Settings["SubAccounts"].items():
if "@" in email:
tasks.append(MultipleClients.LoadAccount(fortniteClient,email,password))
if len(tasks) > 0:
print("Starting sub accounts!")
await asyncio.wait(tasks)
for Client in fortniteClient.Clients.values():
Friends = fortniteClient.has_friend(Client.user.id)
if not Friends:
try:
await fortniteClient.add_friend(Client.user.id)
except:
pass
Client.starting = False
await ready.Ready(fortniteClient)
fortniteClient.starting = False
@fortniteClient.event
async def event_friend_add(friend):
await friends.event_friend_add(fortniteClient, friend)
@fortniteClient.event
async def event_friend_remove(friend):
await friends.event_friend_remove(fortniteClient, friend)
@fortniteClient.event
async def event_friend_request(friend):
await friends.event_friend_request(fortniteClient, friend)
@fortniteClient.event
async def event_party_invite(invitation):
await party.event_party_invite(fortniteClient, invitation)
@fortniteClient.event
async def event_party_member_join(Member):
await party.event_party_member_join(fortniteClient,Member)
@fortniteClient.event
async def event_party_member_promote(old_leader, new_leader):
await party.event_party_member_promote(fortniteClient, old_leader,new_leader)
@fortniteClient.event
async def event_party_message(Message):
await message.Command(fortniteClient, Message)
@fortniteClient.event
async def event_friend_message(Message):
await message.Command(fortniteClient, Message)
app = Flask(__name__)
@app.route('/')
def Home():
return "Follow @LupusLeaks on Twitter"
Thread(target=app.run).start()
Thread(target=UpdateCheck.CheckVersion).start()
Thread(target=UpdateCheck.CheckItems).start()
try:
fortniteClient.run()
except fortnitepy.errors.AuthException:
print(colored("Invalid account credentials!","red"))
|
[
"noreply@github.com"
] |
noreply@github.com
|
ab8a37c6cec2cd67dee3e609825cc30311aeeacd
|
6d8ed4233a766ed34f3e3924fcba241e11341cbc
|
/TestBase64.py
|
bccaf7d0f0bab3149626f3d0982c6fd43a30a27b
|
[] |
no_license
|
kuxingseng/learnPython
|
56b77b01ddfbc3c8483d8abec2bd1eea186b6f19
|
73a746a9329f0ba3bfabb7f5e47864364ed44391
|
refs/heads/master
| 2021-09-12T11:07:33.951552
| 2018-04-08T02:42:05
| 2018-04-08T02:42:05
| 113,518,262
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 451
|
py
|
# Base64是一种用64个字符来表示任意二进制数据的方法。
# 每6个bit转换为一个字符
import base64
b64_encode = base64.b64encode(b'test')
print(b64_encode)
b64_decode = base64.b64decode(b64_encode)
print(b64_decode)
def safe_base64_decode(s):
# 数字乘以字符,等于数字个字符组成的字符串
s = s + len(s) % 4 * b'='
return base64.b64decode(s)
print(safe_base64_decode(b'dGVzdA'))
print(5 * 't')
|
[
"chshuai@hotmail.com"
] |
chshuai@hotmail.com
|
2c635e67dd79f81bcb26276f01d417fc9ebf8127
|
8baf2a9f9b11117e979b6629b8bbb8d7f3395f9a
|
/iohelp.py
|
eb5e6a94c52c9731850f20db1987bd588fbedb9d
|
[
"MIT"
] |
permissive
|
holzfigure/hiveopolis_broodframe_background
|
b28de8b231e0299c886d8776e4e3ba18040dcc37
|
dfe89300c00b0d459f71132b464092ec5d1ce656
|
refs/heads/master
| 2022-03-14T06:57:51.939327
| 2019-11-27T06:19:26
| 2019-11-27T06:19:26
| 219,782,785
| 0
| 0
|
MIT
| 2019-11-05T16:31:46
| 2019-11-05T15:47:36
| null |
UTF-8
|
Python
| false
| false
| 22,641
|
py
|
#!/usr/bin/env python3
"""A library of helpful functions.
Notably to set up output-folders safely, with time-stamped copies
of the source code included.
holzfigure 2019
"""
# import os
# import csv
import time
import math
import shutil
# import argparse
import logging
import logging.handlers
from pathlib import Path
from datetime import datetime # , timedelta
import tkinter
from tkinter import Tk, filedialog
# import numpy as np
import matplotlib
from matplotlib import pyplot as plt
# NOTE: module "imp" is deprecated..
import warnings
warnings.filterwarnings('error', category=DeprecationWarning)
# # GLOBALS
POSTFIX_DIR = "out"
TIME_FMT = "%y%m%d-%H%M%S-utc"
DAY_FMT = "%y%m%d-utc"
# Logging
LOG_MAX_BYTES = 20000000 # ~ 20 MB
LOG_BACKUP_COUNT = 50
# Plotting
DEF_EXT = "png"
DEF_WF = 3.0
COLORMAP = plt.cm.viridis
# colormap = plt.cm.viridis
# colormap = plt.cm.jet
# colormap = plt.cm.gist_ncar
# colormap = plt.cm.Set1
def now_str(pattern=TIME_FMT):
"""Return a formatted timestring for the current time."""
# return time.strftime(pattern, time.gmtime())
return datetime.utcnow().strftime(pattern)
def parse_subtree(filedir, pattern):
"""Parse a subtree (including subfolders) for the pattern.
from:
https://stackoverflow.com/questions/2186525/
use-a-glob-to-find-files-recursively-in-python
+ sorting
[requires 'import fnmatch']
Deprecated since using pathlib! (v180817)
"""
# matches = []
# for root, dirnames, filenames in os.walk(filedir):
# for filename in fnmatch.filter(filenames, pattern):
# matches.append(os.path.join(root, filename))
# return sorted(matches)
filedir = Path(filedir).resolve()
return sorted(filedir.rglob(pattern))
def safename(s, s_type="file"):
"""Append stuff to a file or folder if it already exists.
Check whether a given file or folder 's' exists, return a non-existing
filename.
s ........ (full) filename or directory
s_type ... 'file' or 'f' for files,
'directory' or 'dir' or 'd' for folders
Returns a file- or pathname that is supposedly safe to save
without overwriting data.
"""
# Ensure s is a Path object
p = Path(s)
low_type = str.lower(s_type)
if low_type == "file" or low_type == "f":
# if os.path.isfile(ss
if p.is_file():
stem = p.stem
suffix = p.suffix
counter = 0
while p.is_file():
# p = p.with_name(f"{stem}-{counter:02d}{suffix}")
p = p.with_name("{}-{:02d}{}".format(stem, counter, suffix))
counter += 1
elif low_type == "directory" or low_type == "dir" or low_type == "d":
if p.is_dir():
stem = p.stem
counter = 0
while p.is_dir():
# s = s_base + "-{:02d}".format(counter)
# p = p.with_name(f"{stem}-{counter:02d}")
p = p.with_name("{}-{:02d}".format(stem, counter))
counter += 1
return p
def safesavefig(path, ext=".png", close=True, verbose=False):
"""Safely save a figure from pyplot.
adapted from:
http://www.jesshamrick.com/2012/09/03/saving-figures-from-pyplot/
# plt.gcf().canvas.get_supported_filetypes()
# plt.gcf().canvas.get_supported_filetypes_grouped()
filetypes = {
'ps': 'Postscript',
'eps': 'Encapsulated Postscript',
'pdf': 'Portable Document Format',
'pgf': 'PGF code for LaTeX',
'png': 'Portable Network Graphics',
'raw': 'Raw RGBA bitmap',
'rgba': 'Raw RGBA bitmap',
'svg': 'Scalable Vector Graphics',
'svgz': 'Scalable Vector Graphics',
'jpg': 'Joint Photographic Experts Group',
'jpeg': 'Joint Photographic Experts Group',
'tif': 'Tagged Image File Format',
'tiff': 'Tagged Image File Format'
}
180817 Added a '.' to the default extension to be compatible
with path.suffix
"""
valid_extensions = plt.gcf().canvas.get_supported_filetypes()
fallback_ext = ".png"
# Ensure path is a pathlib.Path object
path = Path(path)
# Parse path components
directory = path.parent
stem = path.stem
suffix = path.suffix
# Check whether path already has an extension
if suffix:
if suffix in valid_extensions:
if suffix != ext:
logging.debug(f"Overwriting kwarg ext '{ext}' " +
f"with suffix '{suffix}' from {path}!")
ext = suffix
else:
logging.debug(f"Overwriting file suffix '{suffix}' "
f"with kwarg ext '{ext}'!")
# Ensure extension is correct
ext = ext.lower()
if not ext.startswith("."):
logging.debug(f"Adding '.' to {ext}")
ext = f".{ext}"
if ext.split(".")[-1] not in valid_extensions:
logging.warning(f"Invalid extension '{ext}', " +
f"replacing with '{fallback_ext}'")
ext = fallback_ext
# Generate filename
# filename = "%s.%s" % (os.path.split(path)[1], ext)
filename = stem + ext
# Ensure valid directory
if not directory:
directory = Path.cwd()
directory = directory.resolve()
if not directory.is_dir():
directory.mkdir(parents=True)
# Finalize full filename
# savepath = os.path.join(directory, filename)
savepath = directory / filename
savepath = safename(savepath, 'file')
# Save figure to file
# TODO: Remove str() once matplotlib is updated??
plt.savefig(str(savepath))
if verbose:
logging.info(f"Saved figure to {savepath}")
if close:
plt.close()
# if verbose:
# logging.debug("Done")
return savepath
def latexify(fig_width=None, fig_height=None, columns=1):
"""Set up matplotlib's RC params for LaTeX plotting.
Call this before plotting a figure.
Parameters
----------
fig_width : float, optional, inches
fig_height : float, optional, inches
columns : {1, 2}
Code adapted from
http://www.scipy.org/Cookbook/Matplotlib/LaTeX_Examples
Width and max height in inches for IEEE journals taken from
https://www.computer.org/cms/Computer.org/Journal%20templates/
transactions_art_guide.pdf
from https://nipunbatra.github.io/blog/2014/latexify.html
(v180817: updated this link)
"""
assert(columns in [1, 2])
if fig_width is None:
fig_width = 3.39 if columns == 1 else 6.9 # width in inches
if fig_height is None:
golden_mean = (math.sqrt(5) - 1.0) / 2.0 # aesthetic ratio
fig_height = fig_width * golden_mean # height in inches
MAX_HEIGHT_INCHES = 8.0
if fig_height > MAX_HEIGHT_INCHES:
print("WARNING: fig_height too large:" + fig_height +
"so will reduce to" + MAX_HEIGHT_INCHES + "inches.")
fig_height = MAX_HEIGHT_INCHES
params = {'backend': 'ps',
'text.latex.preamble': ['\\usepackage{gensymb}'],
'axes.labelsize': 6, # fontsize for x and y labels (was 10)
'axes.titlesize': 6,
'font.size': 6, # 'text.fontsize': 8, # was 10
'legend.fontsize': 6, # was 10
'xtick.labelsize': 6,
'ytick.labelsize': 6,
'text.usetex': True,
'figure.figsize': [fig_width, fig_height],
'font.family': 'serif'
}
matplotlib.rcParams.update(params)
def format_axes(ax):
"""Format axes."""
spine_color = 'gray'
for spine in ['top', 'right']:
ax.spines[spine].set_visible(False)
for spine in ['left', 'bottom']:
ax.spines[spine].set_color(spine_color)
ax.spines[spine].set_linewidth(0.5)
ax.xaxis.set_ticks_position('bottom')
ax.yaxis.set_ticks_position('left')
for axis in [ax.xaxis, ax.yaxis]:
axis.set_tick_params(direction='out', color=spine_color)
return ax
# # The following functions require numpy:
# def euclid(p1, p2):
# return np.sqrt((p2[0] - p1[0])**2 + (p2[1] - p1[1])**2)
#
# def euclid_array(p1s, p2s):
# """The inputs "p1s" and "p2s" are 2-column arrays
# of XY-coordinates.
# """
# return np.sqrt((p2s[:, 0] - p1s[:, 0])**2 +
# (p2s[:, 1] - p1s[:, 1])**2)
def setup_environment(
thisfile,
dir_targ=None,
level=1,
new_dir=True,
prefix_file=None,
postfix_dir=None,
daystamp=False, day_fmt=DAY_FMT,
dependencies=None,
):
"""Create an output directory safely.
No overwriting of existing files and folders.
in (optional):
dir_targ .... str full path to a directory. def=""
level .... int (-1, [0], 1) are the choices.
[-1: dir-out is sibling to the given directory
0: dir_out is the given directory
CAUTION: will only be this directory if
new_dir=False and postfix=None!
DEFAULT: 1: dir_out is child of the given directory]
new_dir .... bool if True, create a new directory, even if
one already exists
if False, write into an existing
directory with the given name
thisfile .... bool if True, get full path to current file
[if i call "os.path.basename(__file__)" here,
will i get the path to the calling code,
or to this file 'holzhelp.py'?]
prefix_file .... str prefix file with this (and '_')
postfix_dir .... str append to name of output-folder.
dependencies ... list paths to other files to copy to dir_out
out:
dir_out .... str full path to created output directory
"""
# Set up directories
# ==================
# # if interactive:
# dir_targ = filedialog.askdirectory(initialdir=DIR_INI)
# thisfile = os.path.basename(__file__)
# thisname = os.path.splitext(os.path.split(thisfile)[1])[0]
thisfile = Path(thisfile).resolve()
thisname = thisfile.stem
if prefix_file:
# thisname = f"{prefix_file}_{thisname}"
thisname = "{}_{}".format(prefix_file, thisname)
if not dir_targ:
# dir_targ = os.path.join(os.getcwd(), postfix)
# dir_targ = os.getcwd()
# dir_targ = Path.cwd() / f"{thisname}_{postfix_dir}"
dir_targ = Path.cwd()
else:
dir_targ = Path(dir_targ)
# determine level to place directory
if level < 0:
# basedir, lastdir = os.path.split(dir_targ)
# os.path.join(basedir, thisname)
# dir_out = dir_targ.with_name(f"{dir_targ.stem}_{thisname}")
dir_out = dir_targ.with_name("{}_{}".format(
dir_targ.stem, thisname))
elif level == 0:
# NOTE: only stays if new_dir=False and postfix=None!
dir_out = dir_targ
elif level > 0:
# dir_out = os.path.join(dir_targ, thisname)
dir_out = dir_targ / thisname
if postfix_dir:
# dir_out += "_" + postfix_dir
# dir_out = dir_out.with_name(f"{dir_out.stem}_{postfix_dir}")
dir_out = dir_out.with_name("{}_{}".format(
dir_out.stem, postfix_dir))
if daystamp:
# dir_out += now_str("_%y%m%d-utc")
# dir_out = dir_out.with_name(f"{dir_out.stem}_{now_str(day_fmt)}")
dir_out = dir_out.with_name("{}_{}".format(
dir_out.stem, now_str(day_fmt)))
if new_dir:
dir_out = safename(dir_out, 'directory')
if not dir_out.is_dir():
# os.mkdir(dir_out)
dir_out.mkdir(parents=True)
# logging.info("created output directory at '{}'".format(dir_out))
# logwarn = []
# else:
# logwarn = ("output directoy already exists, " +
# "error in function safename()")
# copy files to output-directory
src_out = dir_out / "src"
if not src_out.is_dir():
src_out.mkdir()
print(f"Created folder '{src_out}'")
if not dependencies:
dependencies = []
dependencies.append(thisfile)
for filename in dependencies:
# path, fname = os.path.split(filename)
# name, ext = os.path.splitext(fname)
# path = filename.parent
filename = Path(filename).resolve()
name = filename.stem
suffix = filename.suffix
if prefix_file:
# name = f"{prefix_file}_{name}"
name = "{}_{}".format(prefix_file, name)
# thatfile = os.path.join(
# dir_out, name + now_str() + ext)
# thatfile = dir_out / f"{name}_{now_str()}{suffix}"
thatfile = src_out / "{}_{}{}".format(name, now_str(), suffix)
thatfile = safename(thatfile, 'file')
# TODO: Replace this with a proper pathlib method once?
# And remove the 'str()' once Raspbian is n Python 3.6..
shutil.copy2(str(filename), str(thatfile))
# this_split = os.path.splitext(thisfile)
# thatfile = os.path.join(
# dir_out, this_split[0] + now_str() + this_split[1])
# thatfile = safename(thatfile, 'file')
# shutil.copy2(thisfile, thatfile)
return dir_out, thisname # , logwarn
def setup_logging(
thisname,
args,
dir_log=None,
max_bytes=LOG_MAX_BYTES,
backup_count=LOG_BACKUP_COUNT,
):
"""Set up the logging module to log to a file.
Rotate logfiles if they are bigger than LOG_MAX_BYTES.
https://docs.python.org/3/howto/logging-cookbook.html
"""
err_msg = []
if dir_log is None:
# dir_log = os.path.join(os.getcwd(), "DIR_LOG")
dir_log = Path.cwd() / "log"
dir_log = safename(dir_log, 'dir')
if not dir_log.is_dir():
try:
dir_log.mkdir(parents=False)
except Exception as err:
# err_msg.append(
# f"Failed to create directory {dir_log}\n" +
# f"Error: {err}\n" +
# "Now creating full path...")
err_msg.append((
"Failed to create directory {}\n" +
"Error: {}\n" +
"Now creating full path...").format(dir_log, err))
dir_log.mkdir(parents=True)
# log_path = os.path.join(LOC_PATH, "logs")
# thisfile = os.path.basename(__file__)
# logfile = safename(os.path.join(
# dir_log, "{}_{}.log".format(thisname, now_str())), 'file')
logfile = safename(
# (dir_log / f"{thisname}_{now_str()}.log"), 'file')
(dir_log / "{}_{}.log".format(thisname, now_str())), 'file')
# logfile = safename(logfile, 'file')
# logname = thisfile[0:-3] + '.log' # + now_str() + '.log'
if args.debug:
loglevel = logging.DEBUG
else:
loglevel = logging.INFO
# logging.basicConfig(
# level=loglevel,
# format=("%(asctime)s %(levelname)-8s " +
# "%(funcName)-12s: %(message)s"),
# datefmt='%y-%m-%d %H:%M:%S UTC',
# filename=logfile,
# filemode='a')
# # logging.basicConfig(filename=logfile, level=logging.INFO)
# logging.debug("logging to file {}".format(logfile))
# Set level
logging.getLogger('').setLevel(loglevel)
# All times in UTC
logging.Formatter.converter = time.gmtime
# format=('%(asctime)s %(name)-12s %(levelname)-8s %(message)s',
# Rotating logs
# https://docs.python.org/2/howto/
# logging-cookbook.html#using-file-rotation
# Add the log message handler to the logger
# TODO: Remove the "str()" once RPIs have Python3.6
rotater = logging.handlers.RotatingFileHandler(
str(logfile),
mode='a',
maxBytes=max_bytes,
backupCount=backup_count)
# encoding=None,
# delay=0)
# rotater.setLevel(loglevel)
rotate_formatter = logging.Formatter(
fmt="%(asctime)s %(levelname)-8s %(funcName)-12s: %(message)s",
datefmt='%y-%m-%d %H:%M:%S UTC')
rotater.setFormatter(rotate_formatter)
logging.getLogger('').addHandler(rotater)
# if not cron:
# Define a Handler which writes INFO messages or
# higher to the sys.stderr
console = logging.StreamHandler()
# console.setLevel(loglevel) # (logging.INFO)
# Set a format which is simpler for console use
# formatter = logging.Formatter(
# '%(name)-12s: %(levelname)-8s %(message)s')
console_formatter = logging.Formatter(
"%(levelname)-8s: %(message)s")
# Tell the handler to use this format
console.setFormatter(console_formatter)
# Add the handler to the root logger
logging.getLogger('').addHandler(console)
if len(err_msg) > 0:
for msg in err_msg:
logging.warning(msg)
logging.debug("Logging to screen and to {}".format(logfile))
# return dir_log
def select_files(title="select file(s)", dir_ini=None,
filetypes=[("all files", "*")],
more=False):
"""Interactively pick a file (actually its path-string).
If 'more=True', a tuple of files will be returned.
see:
http://infohost.nmt.edu/tcc/help/pubs/tkinter/web/tkFileDialog.html
http://www.programcreek.com/python/example/4281/
tkFileDialog.askopenfilename
http://effbot.org/tkinterbook/tkinter-file-dialogs.htm
Not mentioned in the above refs is ".askopenfilenames()",
which takes the same options but returns a tuple of
selected files.
>> dir(filedialog)
['ACTIVE', 'ALL', 'ANCHOR', 'ARC', 'BASELINE', 'BEVEL', 'BOTH',
'BOTTOM', 'BROWSE', 'BUTT', 'BaseWidget', 'BitmapImage', 'BooleanVar',
'Button', 'CASCADE', 'CENTER', 'CHAR', 'CHECKBUTTON', 'CHORD', 'COMMAND',
'CURRENT', 'CallWrapper', 'Canvas', 'Checkbutton', 'DISABLED', 'DOTBOX',
'Dialog', 'Directory', 'DoubleVar',
'E', 'END', 'EW', 'EXCEPTION', 'EXTENDED', 'Entry', 'Event', 'EventType',
'FALSE', 'FIRST', 'FLAT', 'FileDialog', 'Frame', 'GROOVE', 'Grid',
'HIDDEN', 'HORIZONTAL', 'INSERT', 'INSIDE', 'Image', 'IntVar',
'LAST', 'LEFT', 'Label', 'LabelFrame', 'Listbox', 'LoadFileDialog',
'MITER', 'MOVETO', 'MULTIPLE', 'Menu', 'Menubutton', 'Message',
'Misc', 'N', 'NE', 'NO', 'NONE', 'NORMAL', 'NS', 'NSEW, 'NUMERIC',
'NW', 'NoDefaultRoot', 'OFF', 'ON', 'OUTSIDE', 'Open', 'OptionMenu',
'PAGES', 'PIESLICE', 'PROJECTING', 'Pack', 'PanedWindow', 'PhotoImage',
'Place', 'RADIOBUTTON', 'RAISED', 'READABLE', 'RIDGE', 'RIGHT', 'ROUND',
'Radiobutton', 'S', 'SCROLL', 'SE', 'SEL', 'SEL_FIRST', 'SEL_LAST',
'SEPARATOR', 'SINGLE', 'SOLID', 'SUNKEN', 'SW',
'SaveAs', 'SaveFileDialog', 'Scale', 'Scrollbar', 'Spinbox', 'StringVar',
'TOP', 'TRUE', 'Tcl', 'TclError', 'TclVersion', 'Text', 'Tk', 'TkVersion',
'Toplevel', 'UNDERLINE', 'UNITS', 'VERTICAL', 'Variable', 'W', 'WORD',
'WRITABLE', 'Widget', 'Wm', 'X', 'XView', 'Y', 'YES', 'YView', '_Dialog',
'__builtins__', '__cached__', '__doc__', '__file__', '__loader__',
'__name__', '__package__', '__spec__',
'askdirectory', 'askopenfile', 'askopenfilename', 'askopenfilenames',
'askopenfiles', 'asksaveasfile', 'asksaveasfilename', 'commondialog',
'constants', 'dialogstates', 'enum', 'fnmatch',
'getboolean', 'getdouble', 'getint', 'image_names', 'image_types',
'mainloop', 'os', 're', 'sys', 'test', 'wantobjects']
"""
# Resolve initial directory
if not dir_ini: # or not dir_ini.is_dir():
dir_ini = Path.cwd()
else:
dir_ini = Path(dir_ini).resolve()
# Include this to make the crappy empty window go away
root = Tk()
root.withdraw()
print(f"tkinter version: {tkinter.TkVersion}")
# Set options
opts = {}
opts["parent"] = root
opts["title"] = title
opts["initialdir"] = dir_ini
opts['filetypes'] = filetypes
# Check whether single file or tuple of files is requested
if more:
# tuple of full filenames (paths)
# ffn_return = tkFileDialog.askopenfilenames(**opts)
ffn_return = filedialog.askopenfilenames(**opts)
if len(ffn_return) > 0:
ffn_return = [Path(ffn) for ffn in ffn_return]
else:
# String of full filename (path)
# ffn_return = tkFileDialog.askopenfilename(**opts)
ffn_return = filedialog.askopenfilename(**opts)
if ffn_return:
ffn_return = Path(ffn_return)
# If cancelled, return None
if not ffn_return:
return None
# Return full filename(s)
return ffn_return
def select_directory(title="select directory", dir_ini=None):
"""Interactively retrieve the path to a directory."""
# include this to make the crappy empty window go away
root = Tk()
root.withdraw()
print(f"tkinter version: {tkinter.TkVersion}")
# open directory dialog
# dir_select = tkFileDialog.askdirectory(
dir_select = filedialog.askdirectory(
parent=root,
title=title,
initialdir=dir_ini)
# check cancel or false directoy
if not dir_select:
print("Cancelled by user, returning 'None'")
return None
else:
dir_select = Path(dir_select)
if not dir_select.is_dir():
print(f"Directory '{dir_select}' doesn't exist, returning 'None'")
return None
# return full path of selected diretory
return dir_select
def main():
"""Mock main-function.
Write test cases here.
"""
# setup environment
# thisfile = os.path.basename(__file__)
# dir_out = setup_environment(thisfile, postfix=postfix)
pass
if __name__ == "__main__":
main()
|
[
"daniel.hofstadler@uni-graz.at"
] |
daniel.hofstadler@uni-graz.at
|
0f537880f6377398a6e8bee99458346243422035
|
3eab5590af67374ac8fab14111c3c9f62da3b809
|
/aes-tests.py
|
c86125adb3d0b0671a05954e0875255f56fca8a5
|
[] |
no_license
|
ungear/cardinal
|
497ed16a0a348155a175d4efc510c3e72d0b5e9b
|
448ce9282d0fcf665fb5c49083367a8c4888c813
|
refs/heads/master
| 2021-09-04T02:08:32.286779
| 2018-01-14T12:37:26
| 2018-01-14T12:37:26
| 114,804,899
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,915
|
py
|
import aes
import unittest
TEST_MESSAGE_BLOCK = [0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, 0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, 0x70]
TEST_KEY_BYTES = [0x2b, 0x7e, 0x15, 0x16, 0x28, 0xae, 0xd2, 0xa6, 0xab, 0xf7, 0x15, 0x88, 0x09, 0xcf, 0x4f, 0x3c]
class TestKeyExpansion(unittest.TestCase):
def testKeyScheduleLastKey(self):
keySchedule = aes.keyExpansion(TEST_KEY_BYTES)
lastKey = keySchedule[len(keySchedule)-1]
self.assertEqual(lastKey, 0xb6630ca6)
def testKeyScheduleLength(self):
keySchedule = aes.keyExpansion(TEST_KEY_BYTES)
self.assertEqual(len(keySchedule), 44)
def testKeyScheduleException(self):
with self.assertRaises(ValueError):
aes.keyExpansion(TEST_KEY_BYTES[:10:])
class TestCreateWord(unittest.TestCase):
def testWord(self):
self.assertEqual(aes.createWord(0xa1, 0x11, 0x3b, 0x59), 0xa1113b59)
class TestRotWord(unittest.TestCase):
def testWord(self):
self.assertEqual(aes.rotWord(0xa13c3b59), 0x3c3b59a1)
class TestSubWord(unittest.TestCase):
def testWord(self):
self.assertEqual(aes.subWord(0xa13c3b59), 0x32ebe2cb)
class TestCreateState(unittest.TestCase):
def testState(self):
state = aes.createState(TEST_MESSAGE_BLOCK)
expectedState = [
TEST_MESSAGE_BLOCK[0::4],
TEST_MESSAGE_BLOCK[1::4],
TEST_MESSAGE_BLOCK[2::4],
TEST_MESSAGE_BLOCK[3::4],
]
self.assertEqual(state, expectedState)
class TestSubBytes(unittest.TestCase):
def testSubBytes(self):
state = aes.createState(TEST_MESSAGE_BLOCK)
expectedBytes = [0xef, 0xaa, 0xfb, 0x43, 0x4d, 0x33, 0x85, 0x45, 0xf9, 0x02, 0x7f, 0x50, 0x3c, 0x9f, 0xa8, 0x51]
expectedMutatedState = aes.createState(expectedBytes)
aes.subBytes(state)
self.assertEqual(state, expectedMutatedState)
class TestInvSubBytes(unittest.TestCase):
def testSubBytes(self):
state = aes.createState(TEST_MESSAGE_BLOCK)
expectedBytes = [0xd8, 0xab, 0x00, 0x8c, 0xbc, 0xd3, 0x0a, 0xf7, 0xe4, 0x58, 0x05, 0xb8, 0xb3, 0x45, 0x06, 0xd0]
expectedMutatedState = aes.createState(expectedBytes)
aes.invSubBytes(state)
self.assertEqual(state, expectedMutatedState)
class TestShiftRows(unittest.TestCase):
def testShiftRows(self):
state = aes.createState(TEST_MESSAGE_BLOCK)
expectedBytes = [0x61, 0x66, 0x6b, 0x70, 0x65, 0x6a, 0x6f, 0x64, 0x69, 0x6e, 0x63, 0x68, 0x6d, 0x62, 0x67, 0x6c]
expectedState = aes.createState(expectedBytes)
aes.shiftRows(state)
self.assertEqual(state, expectedState)
class TestInvShiftRows(unittest.TestCase):
def testInvShiftRows(self):
state = aes.createState(TEST_MESSAGE_BLOCK)
expectedBytes = [0x61, 0x6e, 0x6b, 0x68, 0x65, 0x62, 0x6f, 0x6c, 0x69, 0x66, 0x63, 0x70, 0x6d, 0x6a, 0x67, 0x64]
expectedState = aes.createState(expectedBytes)
aes.invShiftRows(state)
self.assertEqual(state, expectedState)
class TestMixColumns(unittest.TestCase):
def testMixColumns(self):
originalBytes = [0xd4, 0xbf, 0x5d, 0x30, 0xe0, 0xb4, 0x52, 0xae, 0xb8, 0x41, 0x11, 0xf1, 0x1e, 0x27, 0x98, 0xe5]
expectedBytes = [0x04, 0x66, 0x81, 0xe5, 0xe0, 0xcb, 0x19, 0x9a, 0x48, 0xf8, 0xd3, 0x7a, 0x28, 0x06, 0x26, 0x4c]
state = aes.createState(originalBytes)
expectedState = aes.createState(expectedBytes)
aes.mixColumns(state)
self.assertEqual(state, expectedState)
class TestInvMixColumns(unittest.TestCase):
def testInvMixColumns(self):
originalBytes = [0x04, 0x66, 0x81, 0xe5, 0xe0, 0xcb, 0x19, 0x9a, 0x48, 0xf8, 0xd3, 0x7a, 0x28, 0x06, 0x26, 0x4c]
expectedBytes = [0xd4, 0xbf, 0x5d, 0x30, 0xe0, 0xb4, 0x52, 0xae, 0xb8, 0x41, 0x11, 0xf1, 0x1e, 0x27, 0x98, 0xe5]
state = aes.createState(originalBytes)
expectedState = aes.createState(expectedBytes)
aes.invMixColumns(state)
self.assertEqual(state, expectedState)
class TestCipher(unittest.TestCase):
def testCipher(self):
inputBytes = [0x32, 0x43, 0xf6, 0xa8, 0x88, 0x5a, 0x30, 0x8d, 0x31, 0x31, 0x98, 0xa2, 0xe0, 0x37, 0x07, 0x34]
exampleCypherKeyBytes = [0x2b, 0x7e, 0x15, 0x16, 0x28, 0xae, 0xd2, 0xa6, 0xab, 0xf7, 0x15, 0x88, 0x09, 0xcf, 0x4f, 0x3c]
expectedResultBytes = [0x39, 0x25, 0x84, 0x1d, 0x02, 0xdc, 0x09, 0xfb, 0xdc, 0x11, 0x85, 0x97, 0x19, 0x6a, 0x0b, 0x32]
inputState = aes.createState(inputBytes)
expectedState = aes.createState(expectedResultBytes)
keySchedule = aes.keyExpansion(exampleCypherKeyBytes)
result = aes.cipher(inputState, keySchedule)
self.assertEqual(result, expectedState)
def testCipher2(self):
inputBytes = [0x00, 0x11, 0x22, 0x33, 0x44, 0x55, 0x66, 0x77, 0x88, 0x99, 0xaa, 0xbb, 0xcc, 0xdd, 0xee, 0xff]
exampleCypherKeyBytes = [0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f]
expectedResultBytes = [0x69, 0xc4, 0xe0, 0xd8, 0x6a, 0x7b, 0x04, 0x30, 0xd8, 0xcd, 0xb7, 0x80, 0x70, 0xb4, 0xc5, 0x5a]
inputState = aes.createState(inputBytes)
expectedState = aes.createState(expectedResultBytes)
keySchedule = aes.keyExpansion(exampleCypherKeyBytes)
result = aes.cipher(inputState, keySchedule)
self.assertEqual(result, expectedState)
class TestInvCipher(unittest.TestCase):
def testInvCipher(self):
inputBytes = [0x69, 0xc4, 0xe0, 0xd8, 0x6a, 0x7b, 0x04, 0x30, 0xd8, 0xcd, 0xb7, 0x80, 0x70, 0xb4, 0xc5, 0x5a]
exampleCypherKeyBytes = [0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f]
expectedResultBytes = [0x00, 0x11, 0x22, 0x33, 0x44, 0x55, 0x66, 0x77, 0x88, 0x99, 0xaa, 0xbb, 0xcc, 0xdd, 0xee, 0xff]
inputState = aes.createState(inputBytes)
expectedState = aes.createState(expectedResultBytes)
keySchedule = aes.keyExpansion(exampleCypherKeyBytes)
result = aes.invCipher(inputState, keySchedule)
self.assertEqual(result, expectedState)
class TestTheWholeProcess(unittest.TestCase):
def testEncriptDecript(self):
plainText = 'idjpi23j023uc0j1-0i-soxl=kixq[wkz=21ks[qqwdqwd'
password = 'dke8qpend'
encodedText = aes.encode(password, plainText)
decodedText = aes.decode(password, encodedText)
self.assertEqual(decodedText, plainText)
class TestGetPasswordHash(unittest.TestCase):
def testHashLength(self):
password7 = '1234567'
password20 = '0123456789abcdef0123'
hash7 = aes.getPasswordHash(password7)
hash20 = aes.getPasswordHash(password20)
self.assertEqual(len(hash7), 16)
self.assertEqual(len(hash20), 16)
if __name__ == '__main__':
unittest.main()
|
[
"redeemer@inbox.ru"
] |
redeemer@inbox.ru
|
860ed367c2aca7e4dd6deba69f2855fdacc19f00
|
f49c866f416a0c2cf89347aa2e0814553f4b5d52
|
/train_eval_for_mrc.py
|
6c3a3cc2610e4fbcbbd86c58408dec97802e41b3
|
[] |
no_license
|
thunderboom/NER_MRC
|
3fec4a4a821592fe9d092ac2b3b9b167dee25cb5
|
a0f6a9a77ed7ed5d6ff4d46d114b83871480b1e7
|
refs/heads/main
| 2022-12-31T17:06:08.782889
| 2020-10-27T10:04:31
| 2020-10-27T10:04:31
| 307,647,627
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 14,300
|
py
|
# coding: UTF-8
import os
import logging
import numpy as np
import torch
import torch.nn as nn
from sklearn import metrics
from seqeval.metrics import f1_score, precision_score, recall_score, classification_report
import time
from utils import extract_flat_spans_batch
from models.loss import DiceLoss
from torch.nn.modules import BCEWithLogitsLoss
from transformers import AdamW, get_linear_schedule_with_warmup
logger = logging.getLogger(__name__)
def compute_loss(config, start_logits, end_logits, span_logits,
start_labels, end_labels, match_labels, start_label_mask, end_label_mask):
batch_size, seq_len = start_logits.size()
start_float_label_mask = start_label_mask.view(-1).float()
end_float_label_mask = end_label_mask.view(-1).float()
match_label_row_mask = start_label_mask.bool().unsqueeze(-1).expand(-1, -1, seq_len)
match_label_col_mask = end_label_mask.bool().unsqueeze(-2).expand(-1, seq_len, -1)
match_label_mask = match_label_row_mask & match_label_col_mask
match_label_mask = torch.triu(match_label_mask, 0) # start should be less equal to end
if config.span_loss_candidates == "all":
# naive mask
float_match_label_mask = match_label_mask.view(batch_size, -1).float()
else:
# use only pred or golden start/end to compute match loss
start_preds = start_logits > 0
end_preds = end_logits > 0
if config.span_loss_candidates == "gold":
match_candidates = ((start_labels.unsqueeze(-1).expand(-1, -1, seq_len) > 0)
& (end_labels.unsqueeze(-2).expand(-1, seq_len, -1) > 0))
else:
match_candidates = torch.logical_or(
(start_preds.unsqueeze(-1).expand(-1, -1, seq_len)
& end_preds.unsqueeze(-2).expand(-1, seq_len, -1)),
(start_labels.unsqueeze(-1).expand(-1, -1, seq_len)
& end_labels.unsqueeze(-2).expand(-1, seq_len, -1))
)
match_label_mask = match_label_mask & match_candidates
float_match_label_mask = match_label_mask.view(batch_size, -1).float()
if config.loss_type == "bce":
bce_loss = BCEWithLogitsLoss(reduction="none")
start_loss = bce_loss(start_logits.view(-1), start_labels.view(-1).float())
start_loss = (start_loss * start_float_label_mask).sum() / start_float_label_mask.sum()
end_loss = bce_loss(end_logits.view(-1), end_labels.view(-1).float())
end_loss = (end_loss * end_float_label_mask).sum() / end_float_label_mask.sum()
match_loss = bce_loss(span_logits.view(batch_size, -1), match_labels.view(batch_size, -1).float())
match_loss = match_loss * float_match_label_mask
match_loss = match_loss.sum() / (float_match_label_mask.sum() + 1e-10)
else:
dice_loss = DiceLoss(with_logits=True, smooth=config.dice_smooth)
start_loss = dice_loss(start_logits, start_labels.float(), start_float_label_mask)
end_loss = dice_loss(end_logits, end_labels.float(), end_float_label_mask)
match_loss = dice_loss(span_logits, match_labels.float(), float_match_label_mask)
return start_loss, end_loss, match_loss
def model_train(config, model, train_iter, dev_iter):
start_time = time.time()
# Prepare optimizer and schedule (linear warmup and decay)
no_decay = ["bias", "LayerNorm.weight"]
optimizer_grouped_parameters = [
{
"params": [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)],
"weight_decay": config.weight_decay,
},
{"params": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)],
"weight_decay": 0.0
},
]
t_total = len(train_iter) * config.num_train_epochs
optimizer = AdamW(optimizer_grouped_parameters, lr=config.learning_rate)
scheduler = get_linear_schedule_with_warmup(
optimizer, num_warmup_steps=t_total * config.warmup_proportion, num_training_steps=t_total
)
#FocalLoss(gamma =2, alpha = 1) #调整gamma=0,1,2,3
# Train!
logger.info("***** Running training *****")
logger.info(" Train Num examples = %d", len(train_iter))
logger.info(" Dev Num examples = %d", len(dev_iter))
logger.info(" Num Epochs = %d", config.num_train_epochs)
logger.info(" Instantaneous batch size GPU/CPU = %d", config.batch_size)
logger.info(" Total optimization steps = %d", t_total)
logger.info(" Train device:%s, id:%d", config.device, config.device_id)
global_batch = 0 # 记录进行到多少batch
dev_best_loss = float('inf')
last_improve = 0 # 记录上次验证集loss下降的batch数
flag = False # 记录是否很久没有效果提升
predict_all = np.array([], dtype=int)
labels_all = np.array([], dtype=int)
total_loss = 0.
for epoch in range(config.num_train_epochs):
logger.info('Epoch [{}/{}]'.format(epoch + 1, config.num_train_epochs))
scheduler.step() # 学习率衰减
for i, (_, input_ids, attention_mask, token_type_ids, type_start_labels, type_end_labels,
start_label_mask, end_label_mask, match_labels, type_) in enumerate(train_iter):
global_batch += 1
model.train()
input_ids = torch.tensor(input_ids).type(torch.LongTensor).to(config.device)
attention_mask = torch.tensor(attention_mask).type(torch.LongTensor).to(config.device)
token_type_ids = torch.tensor(token_type_ids).type(torch.LongTensor).to(config.device)
type_start_labels = torch.tensor(type_start_labels).type(torch.LongTensor).to(config.device)
type_end_labels = torch.tensor(type_end_labels).type(torch.LongTensor).to(config.device)
start_label_mask = torch.tensor(start_label_mask).type(torch.LongTensor).to(config.device)
end_label_mask = torch.tensor(end_label_mask).type(torch.LongTensor).to(config.device)
match_labels = torch.tensor(match_labels).type(torch.LongTensor).to(config.device)
# model output
start_logits, end_logits, span_logits = model(input_ids, attention_mask, token_type_ids)
start_loss, end_loss, match_loss = compute_loss(
config=config,
start_logits=start_logits,
end_logits=end_logits,
span_logits=span_logits,
start_labels=type_start_labels,
end_labels=type_end_labels,
match_labels=match_labels,
start_label_mask=start_label_mask,
end_label_mask=end_label_mask
)
loss = config.weight_start * start_loss + config.weight_end * end_loss + config.weight_span * match_loss
model.zero_grad()
total_loss += loss
loss.backward()
optimizer.step()
scheduler.step() # Update learning rate schedule
# [B, length], [B, length], [B, length, length]
start_preds, end_preds, span_pred = start_logits > 0, end_logits > 0, span_logits>0
active_labels = extract_flat_spans_batch(start_pred=type_start_labels,
end_pred=type_end_labels,
match_pred=match_labels,
label_mask=start_label_mask,
pseudo_tag=type_
)
predic = extract_flat_spans_batch(start_pred=start_preds,
end_pred=end_preds,
match_pred=span_pred,
label_mask=start_label_mask,
pseudo_tag=type_
)
labels_all = np.append(labels_all, active_labels)
predict_all = np.append(predict_all, predic)
if global_batch % config.output == 0:
# 每多少轮输出在训练集和验证集上的效果
true_label = labels_all
predict_label = predict_all
train_acc = metrics.accuracy_score(labels_all, predict_all)
train_precision = precision_score(true_label, predict_label)
train_recall = recall_score(true_label, predict_label)
train_f1 = f1_score(true_label, predict_label)
predict_all = np.array([], dtype=int)
labels_all = np.array([], dtype=int)
acc, precision, recall, f1, dev_loss = model_evaluate(config, model, dev_iter)
if dev_loss < dev_best_loss:
dev_best_loss = dev_loss
improve = '*'
last_improve = global_batch
else:
improve = ''
time_dif = time.time() - start_time
msg = '{0:>6}, Train Loss: {1:>.4f}, train_acc: {2:>.2%}, precision: {3:>.2%}, recall: {4:>.2%}, f1: {5:>.2%}' \
' Val Loss: {6:>5.6f}, acc: {7:>.2%}, precision: {8:>.2%}, recall: {9:>.2%}, f1: {10:>.2%}, ' \
' Time: {11} - {12}'
logger.info(msg.format(global_batch, total_loss / config.output, train_acc, train_precision, train_recall, train_f1,
dev_loss, acc, precision, recall, f1, time_dif, improve))
total_loss = 0.
if config.early_stop and global_batch - last_improve > config.require_improvement:
# 验证集loss超过1000batch没下降,结束训练
logger.info("No optimization for a long time, auto-stopping...")
flag = True
break
if flag:
break
def model_evaluate(config, model, data_iter, test=False):
model.eval()
loss_total = 0
predict_all = np.array([], dtype=int)
labels_all = np.array([], dtype=int)
with torch.no_grad():
for i, (_, input_ids, attention_mask, token_type_ids, type_start_labels, type_end_labels,
start_label_mask, end_label_mask, match_labels, type_) in enumerate(data_iter):
input_ids = torch.tensor(input_ids).type(torch.LongTensor).to(config.device)
attention_mask = torch.tensor(attention_mask).type(torch.LongTensor).to(config.device)
token_type_ids = torch.tensor(token_type_ids).type(torch.LongTensor).to(config.device)
type_start_labels = torch.tensor(type_start_labels).type(torch.LongTensor).to(config.device)
type_end_labels = torch.tensor(type_end_labels).type(torch.LongTensor).to(config.device)
start_label_mask = torch.tensor(start_label_mask).type(torch.LongTensor).to(config.device)
end_label_mask = torch.tensor(end_label_mask).type(torch.LongTensor).to(config.device)
match_labels = torch.tensor(match_labels).type(torch.LongTensor).to(config.device)
# model output
start_logits, end_logits, span_logits = model(input_ids, attention_mask, token_type_ids)
start_loss, end_loss, match_loss = compute_loss(
config=config,
start_logits=start_logits,
end_logits=end_logits,
span_logits=span_logits,
start_labels=type_start_labels,
end_labels=type_end_labels,
match_labels=match_labels,
start_label_mask=start_label_mask,
end_label_mask=end_label_mask
)
loss = config.weight_start * start_loss + config.weight_end * end_loss + config.weight_span * match_loss
loss_total += loss
# [B, length], [B, length], [B, length, length]
start_preds, end_preds, span_pred = start_logits > 0, end_logits > 0, span_logits>0
active_labels = extract_flat_spans_batch(start_pred=type_start_labels,
end_pred=type_end_labels,
match_pred=match_labels,
label_mask=start_label_mask,
pseudo_tag=type_
)
predic = extract_flat_spans_batch(start_pred=start_preds,
end_pred=end_preds,
match_pred=span_pred,
label_mask=start_label_mask,
pseudo_tag=type_
)
labels_all = np.append(labels_all, active_labels)
predict_all = np.append(predict_all, predic)
true_label = labels_all
predict_label = predict_all
acc = metrics.accuracy_score(labels_all, predict_all)
precision = precision_score(true_label, predict_label)
recall = recall_score(true_label, predict_label)
f1 = f1_score(true_label, predict_label)
if test:
report = classification_report(true_label, predict_label, digits=4)
confusion = metrics.confusion_matrix(true_label, predict_label)
return acc, precision, recall, f1, loss_total / len(data_iter), report, confusion
return acc, precision, recall, f1, loss_total / len(data_iter)
def model_test(config, model, test_iter):
# test!
logger.info("***** Running testing *****")
logger.info(" Test Num examples = %d", len(test_iter))
start_time = time.time()
acc, precision, recall, f1, test_loss, test_report, test_confusion = model_evaluate(config, model, test_iter, test=True)
msg = 'Test Loss: {0:>5.4f}, Test acc: {1:>.2%}, precision: {2:>.2%} recall: {3:>.2%}, f1: {4:>.2%}'
logger.info(msg.format(test_loss, acc, precision, recall, f1))
logger.info("Precision, Recall and F1-Score...")
logger.info(test_report)
logger.info("Confusion Matrix...")
logger.info(test_confusion)
time_dif = time.time() - start_time
logger.info("Time usage:%.6fs", time_dif)
|
[
"470810634@qq.com"
] |
470810634@qq.com
|
4199440910460a422c013a0c40e9ecddfe383267
|
e3365bc8fa7da2753c248c2b8a5c5e16aef84d9f
|
/indices/nnbelt.py
|
a561c79d95be99afc054e24528da2a296c42f2e6
|
[] |
no_license
|
psdh/WhatsintheVector
|
e8aabacc054a88b4cb25303548980af9a10c12a8
|
a24168d068d9c69dc7a0fd13f606c080ae82e2a6
|
refs/heads/master
| 2021-01-25T10:34:22.651619
| 2015-09-23T11:54:06
| 2015-09-23T11:54:06
| 42,749,205
| 2
| 3
| null | 2015-09-23T11:54:07
| 2015-09-18T22:06:38
|
Python
|
UTF-8
|
Python
| false
| false
| 1,067
|
py
|
ii = [('LyelCPG2.py', 1), ('MarrFDI.py', 1), ('CoolWHM2.py', 2), ('KembFFF.py', 1), ('RogePAV.py', 4), ('RennJIT.py', 1), ('LeakWTI2.py', 1), ('LeakWTI3.py', 1), ('PettTHE.py', 1), ('TennAP.py', 1), ('PeckJNG.py', 1), ('BailJD2.py', 1), ('FitzRNS3.py', 2), ('WilkJMC2.py', 3), ('CarlTFR.py', 4), ('LyttELD.py', 1), ('BailJD1.py', 1), ('RoscTTI2.py', 1), ('CrokTPS.py', 1), ('ClarGE.py', 2), ('BuckWGM.py', 2), ('LyelCPG.py', 3), ('GilmCRS.py', 1), ('WestJIT2.py', 1), ('DibdTRL2.py', 1), ('CrocDNL.py', 3), ('FerrSDO2.py', 1), ('LeakWTI.py', 1), ('BachARE.py', 1), ('MereHHB3.py', 1), ('HowiWRL2.py', 3), ('BailJD3.py', 1), ('WilkJMC.py', 5), ('MartHRW.py', 4), ('MackCNH.py', 1), ('FitzRNS4.py', 5), ('CoolWHM3.py', 1), ('DequTKM.py', 5), ('FitzRNS.py', 3), ('BowrJMM.py', 1), ('LyttELD3.py', 1), ('RoscTTI.py', 2), ('LewiMJW.py', 1), ('JacoWHI2.py', 1), ('SomeMMH.py', 2), ('BrewDTO.py', 2), ('RogeSIP.py', 6), ('MartHRW2.py', 3), ('MartHSI.py', 3), ('DwigTHH.py', 1), ('BowrJMM2.py', 1), ('BowrJMM3.py', 2), ('BeckWRE.py', 1), ('KeigTSS.py', 1), ('HowiWRL.py', 1)]
|
[
"varunwachaspati@gmail.com"
] |
varunwachaspati@gmail.com
|
fb7e3fc360eec3bf7e5029668cb7c5c927013bc2
|
aa43cbcef5414f240e72b6840b1acc462dccc528
|
/Functions exercises/18.py
|
147de8be9382b2833c8ca80cfe2d2ce048cf7bce
|
[] |
no_license
|
Jeevan5955/Python
|
730a7fd70eb9719e59173231a0530946010da45b
|
7cd46e46787d5ee51524a551f68975c4c9806e48
|
refs/heads/master
| 2022-11-20T11:43:56.586066
| 2020-07-27T05:51:38
| 2020-07-27T05:51:38
| 278,003,415
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 373
|
py
|
def count_primes(num):
primes = [2]
x = 3
if num < 2: # for the case of num = 0 or 1
return 0
while x <= num:
for y in range(3,x,2): # test all odd factors up to x-1
if x%y == 0:
x += 2
break
else:
primes.append(x)
x += 2
print(primes)
return len(primes)
|
[
"noreply@github.com"
] |
noreply@github.com
|
9906f5196c28ef9676151c8ed0c701f327159f25
|
96afce5cdd9c636f066830aa41d4eb9fce1a42d1
|
/pull_list_of_books.py
|
ffd62cd7061bac27e5b98af8e110f5dcf60eee5b
|
[] |
no_license
|
davehedengren/authors
|
d588026f8d29ac96f204bb89d196268ae9fb1faf
|
94c8a288ad5ea6c59313ce4a609ea7b0bc92e1b7
|
refs/heads/master
| 2016-09-06T15:19:03.836097
| 2014-09-20T02:15:36
| 2014-09-20T02:15:36
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 375
|
py
|
books = requests.get('https://www.goodreads.com/author/list.xml', params={'key':my_key,'id':author_id,'page':pg})
books2 = ET.fromstring(books.content)
fields=[0,4,13,14,15,19]
for book in books2[1][3]:
x = unicode(str(author_id) + '|' + str(books2[1][1].text) + '|','utf-8','ignore')
for f in fields:
y = unicode(book[f].text)
x += y +'|'
print x
|
[
"davehedengren@gmail.com"
] |
davehedengren@gmail.com
|
5cf35eeab105fc35d939285aa6aaed87c88a0b92
|
a91b9cc658421d078520f73320bd4551b74ed51f
|
/dev3/demo3/models.py
|
cb564a1b90f3dcea0ac7262e543cc3dadbf0c4a0
|
[] |
no_license
|
2519325088/dev3
|
e0ea12a54a36dc722747dc693974a0ccd11a5bd9
|
847a49542c2612d7fc879438d65a542158c93f12
|
refs/heads/master
| 2020-05-15T02:19:58.605565
| 2019-04-19T03:19:56
| 2019-04-19T03:19:56
| 182,047,444
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 404
|
py
|
from django.db import models
# Create your models here.
class Problem(models.Model):
pname=models.CharField(max_length=200)
def __str__(self):
return self.pname
class Option(models.Model):
oname=models.CharField(max_length=100)
oshu=models.IntegerField(default=0)
pid=models.ForeignKey('Problem',on_delete=models.CASCADE)
def __str__(self):
return self.oname
|
[
"2519325088@qq.com"
] |
2519325088@qq.com
|
5038904bb1fd03747bf1c26a2daa2a87b5a5bcd8
|
fee88a67d4706bddb8999ce2701315c5f62f6e78
|
/onmt/modules/extensions/mlp/mlp_gelu.py
|
a1a1d998fa59cc0685f65e54bba7b2fe97e1aee0
|
[
"MIT"
] |
permissive
|
Dan-hbd/NMTGMinor
|
5cade7d3b6de83cc45a618ab59420274bcd86f15
|
84e59ac8391ee78852d7c71afc60c3c8b8e3d44d
|
refs/heads/master
| 2023-05-30T16:22:58.148920
| 2021-06-15T14:28:48
| 2021-06-15T14:28:48
| 372,408,488
| 0
| 0
|
NOASSERTION
| 2021-05-31T06:44:22
| 2021-05-31T06:44:22
| null |
UTF-8
|
Python
| false
| false
| 15,164
|
py
|
from copy import copy, deepcopy
import math
import torch
from torch import nn
import torch.nn.functional as F
import unittest
from time import time
import numpy as np
import random
import silu_cuda
try:
import apex.amp as amp
from apex.amp import half_function
except (ModuleNotFoundError, ImportError) as e:
amp = None
from ..optimized.compat import half_function
try:
from torch.cuda.amp import custom_fwd, custom_bwd
except (ModuleNotFoundError, ImportError) as e:
from ..optimized.compat import custom_fwd, custom_bwd
try:
import fused_mlp_relu
except (ModuleNotFoundError, ImportError) as e:
fused_mlp_relu = None
try:
import fused_mlp_agelu
except (ModuleNotFoundError, ImportError) as e:
fused_mlp_agelu = None
try:
import fused_mlp_gelu
except (ModuleNotFoundError, ImportError) as e:
fused_mlp_gelu = None
#
# class MlpReluFunction(torch.autograd.Function):
# @staticmethod
# @custom_fwd(cast_inputs=torch.float16)
# def forward(ctx, activation, *args):
# output = fused_mlp.forward(args)
# ctx.save_for_backward(*args)
# ctx.outputs = output
# return output[0]
#
# @staticmethod
# @custom_bwd
# def backward(ctx, grad_o):
# grads = fused_mlp.backward(grad_o, ctx.outputs, ctx.saved_tensors)
# del ctx.outputs
# return (None, *grads)
#
#
class MlpReluFunction(torch.autograd.Function):
@staticmethod
@custom_fwd(cast_inputs=torch.float16)
def forward(ctx, p, *args):
output = fused_mlp_relu.forward(p, args)
ctx.save_for_backward(*args)
ctx.outputs = output
dropout_mask = output[-1]
ctx.p = p
return output[0], dropout_mask
@staticmethod
@custom_bwd
def backward(ctx, *grad_o):
p = ctx.p
grads = fused_mlp_relu.backward(p, grad_o[0], ctx.outputs, ctx.saved_tensors)
del ctx.outputs
return (None, *grads)
class MlpSiluFunction(torch.autograd.Function):
@staticmethod
@custom_fwd(cast_inputs=torch.float16)
def forward(ctx, p, *args):
outputs = fused_mlp_silu.forward(p, args)
ctx.save_for_backward(*args)
ctx.outputs = outputs
dropout_mask = outputs[-1]
ctx.p = p
return outputs[0], dropout_mask
@staticmethod
@custom_bwd
def backward(ctx, *grad_o):
p = ctx.p
grads = fused_mlp_silu.backward(p, grad_o[0], ctx.outputs, ctx.saved_tensors)
del ctx.outputs
return (None, *grads)
class MlpAGeLUFunction(torch.autograd.Function):
@staticmethod
@custom_fwd(cast_inputs=torch.float16)
def forward(ctx, p, *args):
outputs = fused_mlp_agelu.forward(p, args)
ctx.save_for_backward(*args)
ctx.outputs = outputs
dropout_mask = outputs[-1]
ctx.p = p
return outputs[0], dropout_mask
@staticmethod
@custom_bwd
def backward(ctx, *grad_o):
p = ctx.p
grads = fused_mlp_agelu.backward(p, grad_o[0], ctx.outputs, ctx.saved_tensors)
del ctx.outputs
return (None, *grads)
class MlpGeLUFunction(torch.autograd.Function):
@staticmethod
@custom_fwd(cast_inputs=torch.float16)
def forward(ctx, p, *args):
outputs = fused_mlp_gelu.forward(p, args)
ctx.save_for_backward(*args)
ctx.outputs = outputs
dropout_mask = outputs[-1]
ctx.p = p
return outputs[0], dropout_mask
@staticmethod
@custom_bwd
def backward(ctx, *grad_o):
p = ctx.p
grads = fused_mlp_gelu.backward(p, grad_o[0], ctx.outputs, ctx.saved_tensors)
del ctx.outputs
return (None, *grads)
if fused_mlp_agelu:
mlp_agelu_function = half_function(MlpAGeLUFunction.apply)
else:
mlp_agelu_function = None
if fused_mlp_gelu:
mlp_gelu_function = half_function(MlpGeLUFunction.apply)
else:
mlp_gelu_function = None
class SwishFunction(torch.autograd.Function):
@staticmethod
@custom_fwd
def forward(ctx, inp):
ctx.save_for_backward(inp)
return silu_cuda.forward(inp)
@staticmethod
@custom_bwd
def backward(ctx, grad_out):
inp, = ctx.saved_tensors
if not ctx.needs_input_grad[0]: return (None,)
return silu_cuda.backward(inp, grad_out)
def fast_silu(input):
return SwishFunction.apply(input)
class FastSiLU(torch.nn.Module):
def forward(self, input):
return fast_silu(input)
class AGELUFunction(torch.autograd.Function):
@staticmethod
@custom_fwd
def forward(ctx, x):
ctx.save_for_backward(x)
SQRT_M2_PI = 0.7978845608
COEFF = 0.044715
return 0.5 * x * (1.0 + torch.tanh(SQRT_M2_PI * (x + COEFF * torch.pow(x, 3))))
@staticmethod
@custom_bwd
def backward(ctx, grad_out):
x, = ctx.saved_tensors
SQRT_M2_PI = 0.7978845608
COEFF = 0.044715
BACKCOEFF = 0.1070322243
tanh_outf = torch.tanh(SQRT_M2_PI * (x + COEFF * torch.pow(x, 3)))
retf = 0.5 * x * (1.0 - torch.pow(tanh_outf, 2)) * (SQRT_M2_PI + BACKCOEFF * torch.pow(x, 2)) + 0.5 * (
1.0 + tanh_outf)
return grad_out * retf
class AGELU(torch.nn.Module):
def forward(self, input):
return AGELUFunction.apply(input)
def agelu(x):
SQRT_M2_PI = 0.7978845608
COEFF = 0.044715
BACKCOEFF = SQRT_M2_PI * COEFF * 3
return 0.5 * x * (1.0 + torch.tanh(SQRT_M2_PI * (x + COEFF * torch.pow(x, 3))))
def agelu_backward(x, dy):
SQRT_M2_PI = 0.7978845608
COEFF = 0.044715
BACKCOEFF = 0.1070322243
tanh_outf = torch.tanh(SQRT_M2_PI * (x + COEFF * torch.pow(x, 3)))
retf = 0.5 * x * (1.0 - torch.pow(tanh_outf, 2)) * (SQRT_M2_PI + BACKCOEFF * torch.pow(x, 2)) + 0.5 * (
1.0 + tanh_outf)
return dy * retf
if __name__ == '__main__':
class MLP(torch.nn.Module):
"""Launch MLP in C++
Args:
mlp_sizes (list of int): MLP sizes. Example: [1024,1024,1024] will create 2 MLP layers with shape 1024x1024
bias (bool): Default True:
relu (bool): Default True
"""
def __init__(self, mlp_sizes, activation='gelu', dropout=0.25):
super(MLP, self).__init__()
self.num_layers = len(mlp_sizes) - 1
self.mlp_sizes = copy(mlp_sizes)
self.dropout = dropout
if activation is 'relu':
self.activation = 1
elif activation is 'sigmoid':
self.activation = 2
elif activation is 'gelu':
self.activation = 3
else:
raise TypeError("activation must be relu or none.")
self.weights = []
self.biases = []
for i in range(self.num_layers):
w = torch.nn.Parameter(torch.empty(mlp_sizes[i + 1], mlp_sizes[i]))
self.weights.append(w)
name = 'weight_{}'.format(i)
setattr(self, name, w)
b = torch.nn.Parameter(torch.empty(mlp_sizes[i + 1]))
self.biases.append(b)
name = 'bias_{}'.format(i)
setattr(self, name, b)
self.reset_parameters()
def reset_parameters(self):
for weight in self.weights:
dimsum = weight.size(0) + weight.size(1)
std = math.sqrt(2. / float(dimsum))
nn.init.normal_(weight, 0., std)
for bias in self.biases:
std = math.sqrt(1. / float(bias.size(0)))
nn.init.normal_(bias, 0., std)
def forward(self, input, mask=None, ref=False):
if ref:
return self.forward_ref(input, mask)
# return mlp_relu_function(self.dropout, input, *self.weights, *self.biases)
# return mlp_agelu_function(self.dropout, input, *self.weights, *self.biases)
return mlp_gelu_function(self.dropout, input, *self.weights, *self.biases)
def forward_ref(self, input, mask):
i = 0
output = input
for l in range(self.num_layers):
output = F.linear(output, self.weights[l], self.biases[l])
dropout_mask = mask[i:i + output.numel()]
pinv = 1 / (1 - self.dropout)
if l < self.num_layers - 1:
# print(mask.size())
# output = fast_silu(output) * dropout_mask.view(output.size(0), -1) * pinv
# output = GELUFunction.apply(output) * dropout_mask.view(output.size(0), -1) * pinv
output = F.gelu(output) * dropout_mask.view(output.size(0), -1) * pinv
i += output.numel()
return output
def extra_repr(self):
# TODO add dropout probability
s = F"MLP sizes: {self.mlp_sizes}, activation={self.activation}"
return s
batch_size = 24568
mlp_sizes = [1024, 4096, 1024]
# mlp_sizes = [4, 7, 4]
num_iters = 10
class TestMLP(unittest.TestCase):
def test_creation(self):
MLP(mlp_sizes)
def test_numeric(self):
mlp = MLP(mlp_sizes, activation='relu').cuda()
print(mlp)
ref_mlp = deepcopy(mlp)
for _ in range(1):
bsz = random.randint(2850, batch_size // 8) * 8
test_input = torch.empty(bsz, mlp_sizes[0], device="cuda").uniform_(-1., 1.).requires_grad_()
ref_input = test_input.clone().detach().requires_grad_()
mlp_out, dropout_mask = mlp(test_input)
ref_out = ref_mlp.forward(ref_input, dropout_mask, ref=True)
print(dropout_mask.sum() / dropout_mask.numel())
np.testing.assert_allclose(
mlp_out.detach().cpu().numpy(),
ref_out.detach().cpu().numpy(),
atol=1e-5, rtol=1e-4)
# Use mean value as scalar loss. Multiply 10 to make it big enough not zero out
mlp_out.mean().mul(10.).backward()
ref_out.mean().mul(10.).backward()
np.testing.assert_allclose(
test_input.grad.detach().cpu().numpy(),
ref_input.grad.detach().cpu().numpy(),
atol=1e-7, rtol=1e-5)
np.testing.assert_allclose(
mlp.biases[0].grad.detach().cpu().numpy(),
ref_mlp.biases[0].grad.detach().cpu().numpy(),
atol=1e-7, rtol=1e-5)
def test_with_bias(self):
for use_activation in ['relu']:
mlp = MLP(mlp_sizes, activation=use_activation).cuda()
ref_mlp = deepcopy(mlp)
test_input = torch.empty(batch_size, mlp_sizes[0], device="cuda").uniform_(-1., 1.).requires_grad_()
ref_input = test_input.clone().detach().requires_grad_()
mlp_out, dropout_mask = mlp(test_input)
ref_out = ref_mlp(ref_input, dropout_mask, ref=True)
np.testing.assert_allclose(
mlp_out.detach().cpu().numpy(),
ref_out.detach().cpu().numpy(),
atol=1e-7, rtol=1e-5)
# Use mean value as scalar loss. Multiply 10 to make it big enough not zero out
mlp_out.mean().mul(10.).backward()
ref_out.mean().mul(10.).backward()
np.testing.assert_allclose(
test_input.grad.detach().cpu().numpy(),
ref_input.grad.detach().cpu().numpy(),
atol=1e-5, rtol=1e-4)
for l in range(mlp.num_layers):
np.testing.assert_allclose(
mlp.weights[l].grad.detach().cpu().numpy(),
ref_mlp.weights[l].grad.detach().cpu().numpy(),
atol=1e-7, rtol=1e-5)
np.testing.assert_allclose(
mlp.biases[l].grad.detach().cpu().numpy(),
ref_mlp.biases[l].grad.detach().cpu().numpy(),
atol=1e-7, rtol=1e-5)
def test_no_grad(self):
mlp = MLP(mlp_sizes).cuda()
ref_mlp = deepcopy(mlp)
test_input = torch.empty(batch_size, mlp_sizes[0], device="cuda").uniform_(-1., 1.)
ref_input = test_input.clone().detach()
mlp_out, dropout_mask = mlp(test_input)
ref_out = ref_mlp(ref_input, dropout_mask, ref=True)
np.testing.assert_allclose(
mlp_out.detach().cpu().numpy(),
ref_out.detach().cpu().numpy(),
atol=1e-7, rtol=1e-5)
def test_performance_half(self):
mlp = MLP(mlp_sizes).cuda().half()
mlp_layers = []
for i in range(mlp.num_layers):
linear = nn.Linear(mlp_sizes[i], mlp_sizes[i + 1])
mlp.weights[i].data.copy_(linear.weight)
mlp.biases[i].data.copy_(linear.bias)
mlp_layers.append(linear)
if i < mlp.num_layers - 1:
# mlp_layers.append(nn.ReLU(inplace=True))
mlp_layers.append(torch.nn.GELU())
mlp_layers.append(nn.Dropout(0.25))
ref_mlp = nn.Sequential(*mlp_layers).cuda().half()
test_input = torch.empty(
batch_size, mlp_sizes[0], device="cuda", dtype=torch.half).fill_(10.).requires_grad_()
ref_input = torch.empty(
batch_size, mlp_sizes[0], device="cuda", dtype=torch.half).fill_(10.).requires_grad_()
# Warm up GPU
for _ in range(100):
ref_out = ref_mlp(ref_input)
ref_loss = ref_out.mean()
ref_mlp.zero_grad()
ref_loss.backward()
mlp_out, _ = mlp(test_input)
test_loss = mlp_out.mean()
mlp.zero_grad()
test_loss.backward()
torch.cuda.profiler.start()
torch.cuda.synchronize()
start_time = time()
for _ in range(num_iters):
ref_out = ref_mlp(ref_input)
ref_loss = ref_out.mean()
ref_mlp.zero_grad()
ref_loss.backward()
torch.cuda.synchronize()
stop_time = time()
print(F"\nPytorch MLP time {(stop_time - start_time) * 1000. / num_iters:.4f} ms")
torch.cuda.synchronize()
start_time = time()
for _ in range(num_iters):
mlp_out, _ = mlp(test_input)
test_loss = mlp_out.mean()
mlp.zero_grad()
test_loss.backward()
torch.cuda.synchronize()
stop_time = time()
print(F"C++ MLP time {(stop_time - start_time) * 1000. / num_iters:.4f} ms")
torch.cuda.profiler.stop()
unittest.main()
# test = TestMLP()
# test.test_creation()
# test.test_performance_half()
# test.test_with_bias()
|
[
"quanpn90@gmail.com"
] |
quanpn90@gmail.com
|
044bc7efb85e34003ae56c0d48f464ec535fc949
|
7700fd9502e46b9c742093ac8748c43919a84091
|
/rbac_blog/blog/forms.py
|
aebe7f6c213ef3608de4b1fb267456437a4e718d
|
[] |
no_license
|
Guo-kai-feng/python_project
|
e825bf2e20d79bde3facf6e08a4482a025cfe2d4
|
5a24734ad5d4aa82ac47c5d912d1dc48c32c8f24
|
refs/heads/master
| 2020-07-13T22:00:37.493273
| 2019-08-29T13:07:46
| 2019-08-29T13:07:46
| 205,163,822
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,190
|
py
|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
from blog import models
from django import forms
class AlterBlogForm(forms.ModelForm):
class Meta:
model = models.Blog
fields = '__all__'
exclude = []
error_messages = {
'name': {'required': '不能为空'},
'user': {'required': '不能为空'},
}
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
from multiselectfield.forms.fields import MultiSelectFormField
for field_name, field in self.fields.items():
if not isinstance(field, MultiSelectFormField):
field.widget.attrs.update({'class': 'form-control'})
class AlterArticleForm(forms.ModelForm):
class Meta:
model = models.Article
fields = '__all__'
exclude = ['create_at', ]
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
from multiselectfield.forms.fields import MultiSelectFormField
for field_name, field in self.fields.items():
if not isinstance(field, MultiSelectFormField):
field.widget.attrs.update({'class': 'form-control'})
|
[
"798136317@qq.com"
] |
798136317@qq.com
|
0886616bd81e0a2e31e16fed2ae9620947223dac
|
ae326c4e6a2b2d5b67fa8d175249ef90f6a3021a
|
/leo/external/rope/ropetest/refactor/extracttest.py
|
167f7984d254f4be25e2554d9f39807e0827d542
|
[
"GPL-2.0-only",
"GPL-1.0-or-later",
"MIT"
] |
permissive
|
frakel/leo-editor
|
f95e6c77d60485d80fddfbeaf35db961cf691177
|
b574118ee3b7ffe8344fa0d00dac603096117ac7
|
refs/heads/master
| 2020-03-28T10:40:24.621077
| 2018-10-23T14:39:31
| 2018-10-23T14:39:31
| 148,132,817
| 0
| 0
|
MIT
| 2018-09-10T09:40:18
| 2018-09-10T09:40:18
| null |
UTF-8
|
Python
| false
| false
| 42,781
|
py
|
try:
import unittest2 as unittest
except ImportError:
import unittest
import rope.base.codeanalyze
import rope.base.exceptions
from rope.refactor import extract
from ropetest import testutils
class ExtractMethodTest(unittest.TestCase):
def setUp(self):
super(ExtractMethodTest, self).setUp()
self.project = testutils.sample_project()
self.pycore = self.project.pycore
def tearDown(self):
testutils.remove_project(self.project)
super(ExtractMethodTest, self).tearDown()
def do_extract_method(self, source_code, start, end, extracted, **kwds):
testmod = testutils.create_module(self.project, 'testmod')
testmod.write(source_code)
extractor = extract.ExtractMethod(
self.project, testmod, start, end)
self.project.do(extractor.get_changes(extracted, **kwds))
return testmod.read()
def do_extract_variable(self, source_code, start, end, extracted, **kwds):
testmod = testutils.create_module(self.project, 'testmod')
testmod.write(source_code)
extractor = extract.ExtractVariable(self.project, testmod, start, end)
self.project.do(extractor.get_changes(extracted, **kwds))
return testmod.read()
def _convert_line_range_to_offset(self, code, start, end):
lines = rope.base.codeanalyze.SourceLinesAdapter(code)
return lines.get_line_start(start), lines.get_line_end(end)
def test_simple_extract_function(self):
code = "def a_func():\n print('one')\n print('two')\n"
start, end = self._convert_line_range_to_offset(code, 2, 2)
refactored = self.do_extract_method(code, start, end, 'extracted')
expected = "def a_func():\n extracted()\n print('two')\n\n" \
"def extracted():\n print('one')\n"
self.assertEquals(expected, refactored)
def test_extract_function_at_the_end_of_file(self):
code = "def a_func():\n print('one')"
start, end = self._convert_line_range_to_offset(code, 2, 2)
refactored = self.do_extract_method(code, start, end, 'extracted')
expected = "def a_func():\n extracted()\n" \
"def extracted():\n print('one')\n"
self.assertEquals(expected, refactored)
def test_extract_function_after_scope(self):
code = "def a_func():\n print('one')\n print('two')" \
"\n\nprint('hey')\n"
start, end = self._convert_line_range_to_offset(code, 2, 2)
refactored = self.do_extract_method(code, start, end, 'extracted')
expected = "def a_func():\n extracted()\n print('two')\n\n" \
"def extracted():\n print('one')\n\nprint('hey')\n"
self.assertEquals(expected, refactored)
def test_simple_extract_function_with_parameter(self):
code = "def a_func():\n a_var = 10\n print(a_var)\n"
start, end = self._convert_line_range_to_offset(code, 3, 3)
refactored = self.do_extract_method(code, start, end, 'new_func')
expected = "def a_func():\n a_var = 10\n new_func(a_var)\n\n" \
"def new_func(a_var):\n print(a_var)\n"
self.assertEquals(expected, refactored)
def test_not_unread_variables_as_parameter(self):
code = "def a_func():\n a_var = 10\n print('hey')\n"
start, end = self._convert_line_range_to_offset(code, 3, 3)
refactored = self.do_extract_method(code, start, end, 'new_func')
expected = "def a_func():\n a_var = 10\n new_func()\n\n" \
"def new_func():\n print('hey')\n"
self.assertEquals(expected, refactored)
def test_simple_extract_function_with_two_parameter(self):
code = 'def a_func():\n a_var = 10\n another_var = 20\n' \
' third_var = a_var + another_var\n'
start, end = self._convert_line_range_to_offset(code, 4, 4)
refactored = self.do_extract_method(code, start, end, 'new_func')
expected = 'def a_func():\n a_var = 10\n another_var = 20\n' \
' new_func(a_var, another_var)\n\n' \
'def new_func(a_var, another_var):\n' \
' third_var = a_var + another_var\n'
self.assertEquals(expected, refactored)
def test_simple_extract_function_with_return_value(self):
code = 'def a_func():\n a_var = 10\n print(a_var)\n'
start, end = self._convert_line_range_to_offset(code, 2, 2)
refactored = self.do_extract_method(code, start, end, 'new_func')
expected = 'def a_func():\n a_var = new_func()' \
'\n print(a_var)\n\n' \
'def new_func():\n a_var = 10\n return a_var\n'
self.assertEquals(expected, refactored)
def test_extract_function_with_multiple_return_values(self):
code = 'def a_func():\n a_var = 10\n another_var = 20\n' \
' third_var = a_var + another_var\n'
start, end = self._convert_line_range_to_offset(code, 2, 3)
refactored = self.do_extract_method(code, start, end, 'new_func')
expected = 'def a_func():\n a_var, another_var = new_func()\n' \
' third_var = a_var + another_var\n\n' \
'def new_func():\n a_var = 10\n another_var = 20\n' \
' return a_var, another_var\n'
self.assertEquals(expected, refactored)
def test_simple_extract_method(self):
code = 'class AClass(object):\n\n' \
' def a_func(self):\n print(1)\n print(2)\n'
start, end = self._convert_line_range_to_offset(code, 4, 4)
refactored = self.do_extract_method(code, start, end, 'new_func')
expected = 'class AClass(object):\n\n' \
' def a_func(self):\n' \
' self.new_func()\n' \
' print(2)\n\n' \
' def new_func(self):\n print(1)\n'
self.assertEquals(expected, refactored)
def test_extract_method_with_args_and_returns(self):
code = 'class AClass(object):\n' \
' def a_func(self):\n' \
' a_var = 10\n' \
' another_var = a_var * 3\n' \
' third_var = a_var + another_var\n'
start, end = self._convert_line_range_to_offset(code, 4, 4)
refactored = self.do_extract_method(code, start, end, 'new_func')
expected = 'class AClass(object):\n' \
' def a_func(self):\n' \
' a_var = 10\n' \
' another_var = self.new_func(a_var)\n' \
' third_var = a_var + another_var\n\n' \
' def new_func(self, a_var):\n' \
' another_var = a_var * 3\n' \
' return another_var\n'
self.assertEquals(expected, refactored)
def test_extract_method_with_self_as_argument(self):
code = 'class AClass(object):\n' \
' def a_func(self):\n' \
' print(self)\n'
start, end = self._convert_line_range_to_offset(code, 3, 3)
refactored = self.do_extract_method(code, start, end, 'new_func')
expected = 'class AClass(object):\n' \
' def a_func(self):\n' \
' self.new_func()\n\n' \
' def new_func(self):\n' \
' print(self)\n'
self.assertEquals(expected, refactored)
def test_extract_method_with_no_self_as_argument(self):
code = 'class AClass(object):\n' \
' def a_func():\n' \
' print(1)\n'
start, end = self._convert_line_range_to_offset(code, 3, 3)
with self.assertRaises(rope.base.exceptions.RefactoringError):
self.do_extract_method(code, start, end, 'new_func')
def test_extract_method_with_multiple_methods(self):
code = 'class AClass(object):\n' \
' def a_func(self):\n' \
' print(self)\n\n' \
' def another_func(self):\n' \
' pass\n'
start, end = self._convert_line_range_to_offset(code, 3, 3)
refactored = self.do_extract_method(code, start, end, 'new_func')
expected = 'class AClass(object):\n' \
' def a_func(self):\n' \
' self.new_func()\n\n' \
' def new_func(self):\n' \
' print(self)\n\n' \
' def another_func(self):\n' \
' pass\n'
self.assertEquals(expected, refactored)
def test_extract_function_with_function_returns(self):
code = 'def a_func():\n def inner_func():\n pass\n' \
' inner_func()\n'
start, end = self._convert_line_range_to_offset(code, 2, 3)
refactored = self.do_extract_method(code, start, end, 'new_func')
expected = 'def a_func():\n' \
' inner_func = new_func()\n inner_func()\n\n' \
'def new_func():\n' \
' def inner_func():\n pass\n' \
' return inner_func\n'
self.assertEquals(expected, refactored)
def test_simple_extract_global_function(self):
code = "print('one')\nprint('two')\nprint('three')\n"
start, end = self._convert_line_range_to_offset(code, 2, 2)
refactored = self.do_extract_method(code, start, end, 'new_func')
expected = "print('one')\n\ndef new_func():\n print('two')\n" \
"\nnew_func()\nprint('three')\n"
self.assertEquals(expected, refactored)
def test_extract_global_function_inside_ifs(self):
code = 'if True:\n a = 10\n'
start, end = self._convert_line_range_to_offset(code, 2, 2)
refactored = self.do_extract_method(code, start, end, 'new_func')
expected = '\ndef new_func():\n a = 10\n\nif True:\n' \
' new_func()\n'
self.assertEquals(expected, refactored)
def test_extract_function_while_inner_function_reads(self):
code = 'def a_func():\n a_var = 10\n' \
' def inner_func():\n print(a_var)\n' \
' return inner_func\n'
start, end = self._convert_line_range_to_offset(code, 3, 4)
refactored = self.do_extract_method(code, start, end, 'new_func')
expected = 'def a_func():\n a_var = 10\n' \
' inner_func = new_func(a_var)' \
'\n return inner_func\n\n' \
'def new_func(a_var):\n' \
' def inner_func():\n print(a_var)\n' \
' return inner_func\n'
self.assertEquals(expected, refactored)
def test_extract_method_bad_range(self):
code = "def a_func():\n pass\na_var = 10\n"
start, end = self._convert_line_range_to_offset(code, 2, 3)
with self.assertRaises(rope.base.exceptions.RefactoringError):
self.do_extract_method(code, start, end, 'new_func')
def test_extract_method_bad_range2(self):
code = "class AClass(object):\n pass\n"
start, end = self._convert_line_range_to_offset(code, 1, 1)
with self.assertRaises(rope.base.exceptions.RefactoringError):
self.do_extract_method(code, start, end, 'new_func')
def test_extract_method_containing_return(self):
code = 'def a_func(arg):\n if arg:\n return arg * 2' \
'\n return 1'
start, end = self._convert_line_range_to_offset(code, 2, 4)
with self.assertRaises(rope.base.exceptions.RefactoringError):
self.do_extract_method(code, start, end, 'new_func')
def test_extract_method_containing_yield(self):
code = "def a_func(arg):\n yield arg * 2\n"
start, end = self._convert_line_range_to_offset(code, 2, 2)
with self.assertRaises(rope.base.exceptions.RefactoringError):
self.do_extract_method(code, start, end, 'new_func')
def test_extract_method_containing_uncomplete_lines(self):
code = 'a_var = 20\nanother_var = 30\n'
start = code.index('20')
end = code.index('30') + 2
with self.assertRaises(rope.base.exceptions.RefactoringError):
self.do_extract_method(code, start, end, 'new_func')
def test_extract_method_containing_uncomplete_lines2(self):
code = 'a_var = 20\nanother_var = 30\n'
start = code.index('20')
end = code.index('another') + 5
with self.assertRaises(rope.base.exceptions.RefactoringError):
self.do_extract_method(code, start, end, 'new_func')
def test_extract_function_and_argument_as_paramenter(self):
code = 'def a_func(arg):\n print(arg)\n'
start, end = self._convert_line_range_to_offset(code, 2, 2)
refactored = self.do_extract_method(code, start, end, 'new_func')
expected = 'def a_func(arg):\n new_func(arg)\n\n' \
'def new_func(arg):\n print(arg)\n'
self.assertEquals(expected, refactored)
def test_extract_function_and_end_as_the_start_of_a_line(self):
code = 'print("hey")\nif True:\n pass\n'
start = 0
end = code.index('\n') + 1
refactored = self.do_extract_method(code, start, end, 'new_func')
expected = '\ndef new_func():\n print("hey")\n\n' \
'new_func()\nif True:\n pass\n'
self.assertEquals(expected, refactored)
def test_extract_function_and_indented_blocks(self):
code = 'def a_func(arg):\n if True:\n' \
' if True:\n print(arg)\n'
start, end = self._convert_line_range_to_offset(code, 3, 4)
refactored = self.do_extract_method(code, start, end, 'new_func')
expected = 'def a_func(arg):\n ' \
'if True:\n new_func(arg)\n\n' \
'def new_func(arg):\n if True:\n print(arg)\n'
self.assertEquals(expected, refactored)
def test_extract_method_and_multi_line_headers(self):
code = 'def a_func(\n arg):\n print(arg)\n'
start, end = self._convert_line_range_to_offset(code, 3, 3)
refactored = self.do_extract_method(code, start, end, 'new_func')
expected = 'def a_func(\n arg):\n new_func(arg)\n\n' \
'def new_func(arg):\n print(arg)\n'
self.assertEquals(expected, refactored)
def test_single_line_extract_function(self):
code = 'a_var = 10 + 20\n'
start = code.index('10')
end = code.index('20') + 2
refactored = self.do_extract_method(code, start, end, 'new_func')
expected = "\ndef new_func():\n " \
"return 10 + 20\n\na_var = new_func()\n"
self.assertEquals(expected, refactored)
def test_single_line_extract_function2(self):
code = 'def a_func():\n a = 10\n b = a * 20\n'
start = code.rindex('a')
end = code.index('20') + 2
refactored = self.do_extract_method(code, start, end, 'new_func')
expected = 'def a_func():\n a = 10\n b = new_func(a)\n' \
'\ndef new_func(a):\n return a * 20\n'
self.assertEquals(expected, refactored)
def test_single_line_extract_method_and_logical_lines(self):
code = 'a_var = 10 +\\\n 20\n'
start = code.index('10')
end = code.index('20') + 2
refactored = self.do_extract_method(code, start, end, 'new_func')
expected = '\ndef new_func():\n ' \
'return 10 + 20\n\na_var = new_func()\n'
self.assertEquals(expected, refactored)
def test_single_line_extract_method_and_logical_lines2(self):
code = 'a_var = (10,\\\n 20)\n'
start = code.index('10') - 1
end = code.index('20') + 3
refactored = self.do_extract_method(code, start, end, 'new_func')
expected = '\ndef new_func():\n' \
' return (10, 20)\n\na_var = new_func()\n'
self.assertEquals(expected, refactored)
def test_single_line_extract_method(self):
code = "class AClass(object):\n\n" \
" def a_func(self):\n a = 10\n b = a * a\n"
start = code.rindex('=') + 2
end = code.rindex('a') + 1
refactored = self.do_extract_method(code, start, end, 'new_func')
expected = 'class AClass(object):\n\n' \
' def a_func(self):\n' \
' a = 10\n b = self.new_func(a)\n\n' \
' def new_func(self, a):\n return a * a\n'
self.assertEquals(expected, refactored)
def test_single_line_extract_function_if_condition(self):
code = 'if True:\n pass\n'
start = code.index('True')
end = code.index('True') + 4
refactored = self.do_extract_method(code, start, end, 'new_func')
expected = "\ndef new_func():\n return True\n\nif new_func():" \
"\n pass\n"
self.assertEquals(expected, refactored)
def test_unneeded_params(self):
code = 'class A(object):\n ' \
'def a_func(self):\n a_var = 10\n a_var += 2\n'
start = code.rindex('2')
end = code.rindex('2') + 1
refactored = self.do_extract_method(code, start, end, 'new_func')
expected = 'class A(object):\n' \
' def a_func(self):\n a_var = 10\n' \
' a_var += self.new_func()\n\n' \
' def new_func(self):\n return 2\n'
self.assertEquals(expected, refactored)
def test_breaks_and_continues_inside_loops(self):
code = 'def a_func():\n for i in range(10):\n continue\n'
start = code.index('for')
end = len(code) - 1
refactored = self.do_extract_method(code, start, end, 'new_func')
expected = 'def a_func():\n new_func()\n\n' \
'def new_func():\n' \
' for i in range(10):\n continue\n'
self.assertEquals(expected, refactored)
def test_breaks_and_continues_outside_loops(self):
code = 'def a_func():\n' \
' for i in range(10):\n a = i\n continue\n'
start = code.index('a = i')
end = len(code) - 1
with self.assertRaises(rope.base.exceptions.RefactoringError):
self.do_extract_method(code, start, end, 'new_func')
def test_variable_writes_followed_by_variable_reads_after_extraction(self):
code = 'def a_func():\n a = 1\n a = 2\n b = a\n'
start = code.index('a = 1')
end = code.index('a = 2') - 1
refactored = self.do_extract_method(code, start, end, 'new_func')
expected = 'def a_func():\n new_func()\n a = 2\n b = a\n\n' \
'def new_func():\n a = 1\n'
self.assertEquals(expected, refactored)
def test_var_writes_followed_by_var_reads_inside_extraction(self):
code = 'def a_func():\n a = 1\n a = 2\n b = a\n'
start = code.index('a = 2')
end = len(code) - 1
refactored = self.do_extract_method(code, start, end, 'new_func')
expected = 'def a_func():\n a = 1\n new_func()\n\n' \
'def new_func():\n a = 2\n b = a\n'
self.assertEquals(expected, refactored)
def test_extract_variable(self):
code = 'a_var = 10 + 20\n'
start = code.index('10')
end = code.index('20') + 2
refactored = self.do_extract_variable(code, start, end, 'new_var')
expected = 'new_var = 10 + 20\na_var = new_var\n'
self.assertEquals(expected, refactored)
def test_extract_variable_multiple_lines(self):
code = 'a = 1\nb = 2\n'
start = code.index('1')
end = code.index('1') + 1
refactored = self.do_extract_variable(code, start, end, 'c')
expected = 'c = 1\na = c\nb = 2\n'
self.assertEquals(expected, refactored)
def test_extract_variable_in_the_middle_of_statements(self):
code = 'a = 1 + 2\n'
start = code.index('1')
end = code.index('1') + 1
refactored = self.do_extract_variable(code, start, end, 'c')
expected = 'c = 1\na = c + 2\n'
self.assertEquals(expected, refactored)
def test_extract_variable_for_a_tuple(self):
code = 'a = 1, 2\n'
start = code.index('1')
end = code.index('2') + 1
refactored = self.do_extract_variable(code, start, end, 'c')
expected = 'c = 1, 2\na = c\n'
self.assertEquals(expected, refactored)
def test_extract_variable_for_a_string(self):
code = 'def a_func():\n a = "hey!"\n'
start = code.index('"')
end = code.rindex('"') + 1
refactored = self.do_extract_variable(code, start, end, 'c')
expected = 'def a_func():\n c = "hey!"\n a = c\n'
self.assertEquals(expected, refactored)
def test_extract_variable_inside_ifs(self):
code = 'if True:\n a = 1 + 2\n'
start = code.index('1')
end = code.rindex('2') + 1
refactored = self.do_extract_variable(code, start, end, 'b')
expected = 'if True:\n b = 1 + 2\n a = b\n'
self.assertEquals(expected, refactored)
def test_extract_variable_inside_ifs_and_logical_lines(self):
code = 'if True:\n a = (3 + \n(1 + 2))\n'
start = code.index('1')
end = code.index('2') + 1
refactored = self.do_extract_variable(code, start, end, 'b')
expected = 'if True:\n b = 1 + 2\n a = (3 + \n(b))\n'
self.assertEquals(expected, refactored)
# TODO: Handle when extracting a subexpression
def xxx_test_extract_variable_for_a_subexpression(self):
code = 'a = 3 + 1 + 2\n'
start = code.index('1')
end = code.index('2') + 1
refactored = self.do_extract_variable(code, start, end, 'b')
expected = 'b = 1 + 2\na = 3 + b\n'
self.assertEquals(expected, refactored)
def test_extract_variable_starting_from_the_start_of_the_line(self):
code = 'a_dict = {1: 1}\na_dict.values().count(1)\n'
start = code.rindex('a_dict')
end = code.index('count') - 1
refactored = self.do_extract_variable(code, start, end, 'values')
expected = 'a_dict = {1: 1}\n' \
'values = a_dict.values()\nvalues.count(1)\n'
self.assertEquals(expected, refactored)
def test_extract_variable_on_the_last_line_of_a_function(self):
code = 'def f():\n a_var = {}\n a_var.keys()\n'
start = code.rindex('a_var')
end = code.index('.keys')
refactored = self.do_extract_variable(code, start, end, 'new_var')
expected = 'def f():\n a_var = {}\n ' \
'new_var = a_var\n new_var.keys()\n'
self.assertEquals(expected, refactored)
def test_extract_variable_on_the_indented_function_statement(self):
code = 'def f():\n if True:\n a_var = 1 + 2\n'
start = code.index('1')
end = code.index('2') + 1
refactored = self.do_extract_variable(code, start, end, 'new_var')
expected = 'def f():\n if True:\n' \
' new_var = 1 + 2\n a_var = new_var\n'
self.assertEquals(expected, refactored)
def test_extract_method_on_the_last_line_of_a_function(self):
code = 'def f():\n a_var = {}\n a_var.keys()\n'
start = code.rindex('a_var')
end = code.index('.keys')
refactored = self.do_extract_method(code, start, end, 'new_f')
expected = 'def f():\n a_var = {}\n new_f(a_var).keys()\n\n' \
'def new_f(a_var):\n return a_var\n'
self.assertEquals(expected, refactored)
def test_raising_exception_when_on_incomplete_variables(self):
code = 'a_var = 10 + 20\n'
start = code.index('10') + 1
end = code.index('20') + 2
with self.assertRaises(rope.base.exceptions.RefactoringError):
self.do_extract_method(code, start, end, 'new_func')
def test_raising_exception_when_on_incomplete_variables_on_end(self):
code = 'a_var = 10 + 20\n'
start = code.index('10')
end = code.index('20') + 1
with self.assertRaises(rope.base.exceptions.RefactoringError):
self.do_extract_method(code, start, end, 'new_func')
def test_raising_exception_on_bad_parens(self):
code = 'a_var = (10 + 20) + 30\n'
start = code.index('20')
end = code.index('30') + 2
with self.assertRaises(rope.base.exceptions.RefactoringError):
self.do_extract_method(code, start, end, 'new_func')
def test_raising_exception_on_bad_operators(self):
code = 'a_var = 10 + 20 + 30\n'
start = code.index('10')
end = code.rindex('+') + 1
with self.assertRaises(rope.base.exceptions.RefactoringError):
self.do_extract_method(code, start, end, 'new_func')
# FIXME: Extract method should be more intelligent about bad ranges
def xxx_test_raising_exception_on_function_parens(self):
code = 'a = range(10)'
start = code.index('(')
end = code.rindex(')') + 1
with self.assertRaises(rope.base.exceptions.RefactoringError):
self.do_extract_method(code, start, end, 'new_func')
def test_extract_method_and_extra_blank_lines(self):
code = '\nprint(1)\n'
refactored = self.do_extract_method(code, 0, len(code), 'new_f')
expected = '\n\ndef new_f():\n print(1)\n\nnew_f()\n'
self.assertEquals(expected, refactored)
def test_variable_writes_in_the_same_line_as_variable_read(self):
code = 'a = 1\na = 1 + a\n'
start = code.index('\n') + 1
end = len(code)
refactored = self.do_extract_method(code, start, end, 'new_f',
global_=True)
expected = 'a = 1\n\ndef new_f(a):\n a = 1 + a\n\nnew_f(a)\n'
self.assertEquals(expected, refactored)
def test_variable_writes_in_the_same_line_as_variable_read2(self):
code = 'a = 1\na += 1\n'
start = code.index('\n') + 1
end = len(code)
refactored = self.do_extract_method(code, start, end, 'new_f',
global_=True)
expected = 'a = 1\n\ndef new_f():\n a += 1\n\nnew_f()\n'
self.assertEquals(expected, refactored)
def test_variable_and_similar_expressions(self):
code = 'a = 1\nb = 1\n'
start = code.index('1')
end = start + 1
refactored = self.do_extract_variable(code, start, end,
'one', similar=True)
expected = 'one = 1\na = one\nb = one\n'
self.assertEquals(expected, refactored)
def test_definition_should_appear_before_the_first_use(self):
code = 'a = 1\nb = 1\n'
start = code.rindex('1')
end = start + 1
refactored = self.do_extract_variable(code, start, end,
'one', similar=True)
expected = 'one = 1\na = one\nb = one\n'
self.assertEquals(expected, refactored)
def test_extract_method_and_similar_expressions(self):
code = 'a = 1\nb = 1\n'
start = code.index('1')
end = start + 1
refactored = self.do_extract_method(code, start, end,
'one', similar=True)
expected = '\ndef one():\n return 1\n\na = one()\nb = one()\n'
self.assertEquals(expected, refactored)
def test_simple_extract_method_and_similar_statements(self):
code = 'class AClass(object):\n\n' \
' def func1(self):\n a = 1 + 2\n b = a\n' \
' def func2(self):\n a = 1 + 2\n b = a\n'
start, end = self._convert_line_range_to_offset(code, 4, 4)
refactored = self.do_extract_method(code, start, end,
'new_func', similar=True)
expected = 'class AClass(object):\n\n' \
' def func1(self):\n' \
' a = self.new_func()\n b = a\n\n' \
' def new_func(self):\n' \
' a = 1 + 2\n return a\n' \
' def func2(self):\n' \
' a = self.new_func()\n b = a\n'
self.assertEquals(expected, refactored)
def test_extract_method_and_similar_statements2(self):
code = 'class AClass(object):\n\n' \
' def func1(self, p1):\n a = p1 + 2\n' \
' def func2(self, p2):\n a = p2 + 2\n'
start = code.rindex('p1')
end = code.index('2\n') + 1
refactored = self.do_extract_method(code, start, end,
'new_func', similar=True)
expected = 'class AClass(object):\n\n' \
' def func1(self, p1):\n ' \
'a = self.new_func(p1)\n\n' \
' def new_func(self, p1):\n return p1 + 2\n' \
' def func2(self, p2):\n a = self.new_func(p2)\n'
self.assertEquals(expected, refactored)
def test_extract_method_and_similar_sttemnts_return_is_different(self):
code = 'class AClass(object):\n\n' \
' def func1(self, p1):\n a = p1 + 2\n' \
' def func2(self, p2):\n self.attr = p2 + 2\n'
start = code.rindex('p1')
end = code.index('2\n') + 1
refactored = self.do_extract_method(code, start, end,
'new_func', similar=True)
expected = 'class AClass(object):\n\n' \
' def func1(self, p1):' \
'\n a = self.new_func(p1)\n\n' \
' def new_func(self, p1):\n return p1 + 2\n' \
' def func2(self, p2):\n' \
' self.attr = self.new_func(p2)\n'
self.assertEquals(expected, refactored)
def test_definition_should_appear_where_it_is_visible(self):
code = 'if True:\n a = 1\nelse:\n b = 1\n'
start = code.rindex('1')
end = start + 1
refactored = self.do_extract_variable(code, start, end,
'one', similar=True)
expected = 'one = 1\nif True:\n a = one\nelse:\n b = one\n'
self.assertEquals(expected, refactored)
def test_extract_variable_and_similar_statements_in_classes(self):
code = 'class AClass(object):\n\n' \
' def func1(self):\n a = 1\n' \
' def func2(self):\n b = 1\n'
start = code.index(' 1') + 1
refactored = self.do_extract_variable(code, start, start + 1,
'one', similar=True)
expected = 'class AClass(object):\n\n' \
' def func1(self):\n one = 1\n a = one\n' \
' def func2(self):\n b = 1\n'
self.assertEquals(expected, refactored)
def test_extract_method_in_staticmethods(self):
code = 'class AClass(object):\n\n' \
' @staticmethod\n def func2():\n b = 1\n'
start = code.index(' 1') + 1
refactored = self.do_extract_method(code, start, start + 1,
'one', similar=True)
expected = 'class AClass(object):\n\n' \
' @staticmethod\n def func2():\n' \
' b = AClass.one()\n\n' \
' @staticmethod\n def one():\n' \
' return 1\n'
self.assertEquals(expected, refactored)
def test_extract_normal_method_with_staticmethods(self):
code = 'class AClass(object):\n\n' \
' @staticmethod\n def func1():\n b = 1\n' \
' def func2(self):\n b = 1\n'
start = code.rindex(' 1') + 1
refactored = self.do_extract_method(code, start, start + 1,
'one', similar=True)
expected = 'class AClass(object):\n\n' \
' @staticmethod\n def func1():\n b = 1\n' \
' def func2(self):\n b = self.one()\n\n' \
' def one(self):\n return 1\n'
self.assertEquals(expected, refactored)
def test_extract_variable_with_no_new_lines_at_the_end(self):
code = 'a_var = 10'
start = code.index('10')
end = start + 2
refactored = self.do_extract_variable(code, start, end, 'new_var')
expected = 'new_var = 10\na_var = new_var'
self.assertEquals(expected, refactored)
def test_extract_method_containing_return_in_functions(self):
code = 'def f(arg):\n return arg\nprint(f(1))\n'
start, end = self._convert_line_range_to_offset(code, 1, 3)
refactored = self.do_extract_method(code, start, end, 'a_func')
expected = '\ndef a_func():\n def f(arg):\n return arg\n' \
' print(f(1))\n\na_func()\n'
self.assertEquals(expected, refactored)
def test_extract_method_and_varying_first_parameter(self):
code = 'class C(object):\n' \
' def f1(self):\n print(str(self))\n' \
' def f2(self):\n print(str(1))\n'
start = code.index('print(') + 6
end = code.index('))\n') + 1
refactored = self.do_extract_method(code, start, end,
'to_str', similar=True)
expected = 'class C(object):\n' \
' def f1(self):\n print(self.to_str())\n\n' \
' def to_str(self):\n return str(self)\n' \
' def f2(self):\n print(str(1))\n'
self.assertEquals(expected, refactored)
def test_extract_method_when_an_attribute_exists_in_function_scope(self):
code = 'class A(object):\n def func(self):\n pass\n' \
'a = A()\n' \
'def f():\n' \
' func = a.func()\n' \
' print func\n'
start, end = self._convert_line_range_to_offset(code, 6, 6)
refactored = self.do_extract_method(code, start, end, 'g')
refactored = refactored[refactored.index('A()') + 4:]
expected = 'def f():\n func = g()\n print func\n\n' \
'def g():\n func = a.func()\n return func\n'
self.assertEquals(expected, refactored)
def test_global_option_for_extract_method(self):
code = 'def a_func():\n print(1)\n'
start, end = self._convert_line_range_to_offset(code, 2, 2)
refactored = self.do_extract_method(code, start, end,
'extracted', global_=True)
expected = 'def a_func():\n extracted()\n\n' \
'def extracted():\n print(1)\n'
self.assertEquals(expected, refactored)
def test_global_extract_method(self):
code = 'class AClass(object):\n\n' \
' def a_func(self):\n print(1)\n'
start, end = self._convert_line_range_to_offset(code, 4, 4)
refactored = self.do_extract_method(code, start, end,
'new_func', global_=True)
expected = 'class AClass(object):\n\n' \
' def a_func(self):\n new_func()\n\n' \
'def new_func():\n print(1)\n'
self.assertEquals(expected, refactored)
def test_extract_method_with_multiple_methods(self): # noqa
code = 'class AClass(object):\n' \
' def a_func(self):\n' \
' print(1)\n\n' \
' def another_func(self):\n' \
' pass\n'
start, end = self._convert_line_range_to_offset(code, 3, 3)
refactored = self.do_extract_method(code, start, end,
'new_func', global_=True)
expected = 'class AClass(object):\n' \
' def a_func(self):\n' \
' new_func()\n\n' \
' def another_func(self):\n' \
' pass\n\n' \
'def new_func():\n' \
' print(1)\n'
self.assertEquals(expected, refactored)
def test_where_to_seach_when_extracting_global_names(self):
code = 'def a():\n return 1\ndef b():\n return 1\nb = 1\n'
start = code.index('1')
end = start + 1
refactored = self.do_extract_variable(code, start, end, 'one',
similar=True, global_=True)
expected = 'def a():\n return one\none = 1\n' \
'def b():\n return one\nb = one\n'
self.assertEquals(expected, refactored)
def test_extracting_pieces_with_distinct_temp_names(self):
code = 'a = 1\nprint a\nb = 1\nprint b\n'
start = code.index('a')
end = code.index('\nb')
refactored = self.do_extract_method(code, start, end, 'f',
similar=True, global_=True)
expected = '\ndef f():\n a = 1\n print a\n\nf()\nf()\n'
self.assertEquals(expected, refactored)
def test_extract_methods_in_glob_funcs_should_be_glob(self):
code = 'def f():\n a = 1\ndef g():\n b = 1\n'
start = code.rindex('1')
refactored = self.do_extract_method(code, start, start + 1, 'one',
similar=True, global_=False)
expected = 'def f():\n a = one()\ndef g():\n b = one()\n\n' \
'def one():\n return 1\n'
self.assertEquals(expected, refactored)
def test_extract_methods_in_glob_funcs_should_be_glob_2(self):
code = 'if 1:\n var = 2\n'
start = code.rindex('2')
refactored = self.do_extract_method(code, start, start + 1, 'two',
similar=True, global_=False)
expected = '\ndef two():\n return 2\n\nif 1:\n var = two()\n'
self.assertEquals(expected, refactored)
def test_extract_method_and_try_blocks(self):
code = 'def f():\n try:\n pass\n' \
' except Exception:\n pass\n'
start, end = self._convert_line_range_to_offset(code, 2, 5)
refactored = self.do_extract_method(code, start, end, 'g')
expected = 'def f():\n g()\n\ndef g():\n try:\n pass\n' \
' except Exception:\n pass\n'
self.assertEquals(expected, refactored)
def test_extract_and_not_passing_global_functions(self):
code = 'def next(p):\n return p + 1\nvar = next(1)\n'
start = code.rindex('next')
refactored = self.do_extract_method(code, start, len(code) - 1, 'two')
expected = 'def next(p):\n return p + 1\n' \
'\ndef two():\n return next(1)\n\nvar = two()\n'
self.assertEquals(expected, refactored)
def test_extracting_with_only_one_return(self):
code = 'def f():\n var = 1\n return var\n'
start, end = self._convert_line_range_to_offset(code, 2, 3)
refactored = self.do_extract_method(code, start, end, 'g')
expected = 'def f():\n return g()\n\n' \
'def g():\n var = 1\n return var\n'
self.assertEquals(expected, refactored)
def test_extracting_variable_and_implicit_continuations(self):
code = 's = ("1"\n "2")\n'
start = code.index('"')
end = code.rindex('"') + 1
refactored = self.do_extract_variable(code, start, end, 's2')
expected = 's2 = "1" "2"\ns = (s2)\n'
self.assertEquals(expected, refactored)
def test_extracting_method_and_implicit_continuations(self):
code = 's = ("1"\n "2")\n'
start = code.index('"')
end = code.rindex('"') + 1
refactored = self.do_extract_method(code, start, end, 'f')
expected = '\ndef f():\n return "1" "2"\n\ns = (f())\n'
self.assertEquals(expected, refactored)
def test_passing_conditional_updated_vars_in_extracted(self):
code = 'def f(a):\n' \
' if 0:\n' \
' a = 1\n' \
' print(a)\n'
start, end = self._convert_line_range_to_offset(code, 2, 4)
refactored = self.do_extract_method(code, start, end, 'g')
expected = 'def f(a):\n' \
' g(a)\n\n' \
'def g(a):\n' \
' if 0:\n' \
' a = 1\n' \
' print(a)\n'
self.assertEquals(expected, refactored)
def test_returning_conditional_updated_vars_in_extracted(self):
code = 'def f(a):\n' \
' if 0:\n' \
' a = 1\n' \
' print(a)\n'
start, end = self._convert_line_range_to_offset(code, 2, 3)
refactored = self.do_extract_method(code, start, end, 'g')
expected = 'def f(a):\n' \
' a = g(a)\n' \
' print(a)\n\n' \
'def g(a):\n' \
' if 0:\n' \
' a = 1\n' \
' return a\n'
self.assertEquals(expected, refactored)
def test_extract_method_with_variables_possibly_written_to(self):
code = "def a_func(b):\n" \
" if b > 0:\n" \
" a = 2\n" \
" print a\n"
start, end = self._convert_line_range_to_offset(code, 2, 3)
refactored = self.do_extract_method(code, start, end, 'extracted')
expected = "def a_func(b):\n" \
" a = extracted(b)\n" \
" print a\n\n" \
"def extracted(b):\n" \
" if b > 0:\n" \
" a = 2\n" \
" return a\n"
self.assertEquals(expected, refactored)
if __name__ == '__main__':
unittest.main()
|
[
"edreamleo@gmail.com"
] |
edreamleo@gmail.com
|
fba3989988948bd693ff0403cfac404b8e39195b
|
fbf05e2d3eef31367f26af979150f1d8325f6b2f
|
/flask_dmango/query/exceptions.py
|
12742acd9013a856f68e40b5a47283c5465a20f1
|
[] |
no_license
|
jungkoo/flask-dmango
|
0b653dac9b72935315244b9ff74b5fc89c5bb542
|
abecafa611fce10cd34c9b8401df179b163a9424
|
refs/heads/master
| 2021-01-10T09:29:01.651686
| 2016-03-27T08:27:57
| 2016-03-27T08:27:57
| 54,324,383
| 1
| 1
| null | 2016-03-22T04:18:53
| 2016-03-20T15:14:43
|
Python
|
UTF-8
|
Python
| false
| false
| 1,152
|
py
|
#-*- coding: utf-8 -*-
class DmangoException(Exception):
def __init__(self, message):
self._message = message
def _get_message(self):
return self._message
def _set_message(self, message):
self._message = message
message = property(_get_message, _set_message)
class NotSupportMethod(DmangoException):
def __init__(self, method):
self.method = method
def __str__(self):
return '"%s" is not a valid method name' % (str(self.method), )
class DmangoValueException(DmangoException):
"""
value 를 관련 문제를 의미한다.
예를 들어 'a' 를 int 로 변경하려고 했을경우라거나... 값이 허용되는게 아니라거나.
"""
def __init__(self, value_name):
self._value_name = value_name
def __str__(self):
return '[DMANGO-100002] value Error. (value name="%s")' % (self._value_name,)
class DmangoParseException(DmangoException):
def __init__(self, errmsg='parse error'):
self.errmsg = errmsg
def __str__(self):
return "[DMANGO-100003] " + self.errmsg
|
[
"deajang@gmail.com"
] |
deajang@gmail.com
|
a1206366cfe0cff96c0e2306766ca9fd485e3b71
|
ec61946a176935044d08cf1244d2185f2460df32
|
/pyleecan/Methods/Machine/Lamination/comp_surface_axial_vent.py
|
67a12115e86fe145e22c9c128345b44e666dd252
|
[
"Apache-2.0"
] |
permissive
|
Lunreth/pyleecan
|
d3974a144cb8a6c332339ab0426f1630b7516fc9
|
1faedde4b24acc6361fa1fdd4e980eaec4ca3a62
|
refs/heads/master
| 2023-06-07T01:46:32.453763
| 2021-07-01T21:29:51
| 2021-07-01T21:29:51
| 383,880,732
| 1
| 0
|
Apache-2.0
| 2021-07-07T17:47:01
| 2021-07-07T17:47:01
| null |
UTF-8
|
Python
| false
| false
| 426
|
py
|
# -*- coding: utf-8 -*-
def comp_surface_axial_vent(self):
"""Compute the Lamination axial vent
Parameters
----------
self : Lamination
A Lamination object
Returns
-------
Svent: float
Surface of the Lamination's axial ventilation [m**2]
"""
if len(self.axial_vent) > 0:
return sum([vent.comp_surface() for vent in self.axial_vent])
else:
return 0
|
[
"pierre.bonneel@eomys.com"
] |
pierre.bonneel@eomys.com
|
04777f286a93e171d45397ce0a3643795c4b76af
|
9d3cab321f1b940bc7ef0ffbd1c7779d58008e35
|
/hooks.py
|
b2c04ec8ba85f827c9081d4e87b7c411a59aea3d
|
[] |
no_license
|
celsoflores/orbui
|
ae37d2497c1eebc3132097d98a1847950cfd6eed
|
6b8ac794832e4baa6cf5ef8e0d7ba5ed8eda12e6
|
refs/heads/master
| 2022-11-13T03:20:40.098283
| 2016-05-16T22:16:41
| 2016-05-16T22:16:41
| 276,276,112
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,127
|
py
|
# -*- coding: utf-8 -*-
# Validation hook for relationship
# Author: Softtek - MLCS
# Date: Jun 2014
# Project: Platypus
from cubicweb import ValidationError
from cubicweb.server.hook import Hook, match_rtype
from cubes.orbui.views.autocomplete_edition_view import AutoCompleteEntityRetriever
class Validate_Autocomplete_RulesHook(Hook):
"""
Validate the correct application of the autocomplete rules
"""
__regid__ = 'validateAutocompleteRules'
__select__ = Hook.__select__ & ~match_rtype('created_by', 'owned_by')
events = ('before_add_relation',)
def __call__(self):
#print 'eidfrom: %s, eidto: %s, rtype: %s' % (self.eidfrom, self.eidto, self.rtype)
#Cuando ya existe la relación no se evaluan las condiciones especiales
srql = 'Any X, Y WHERE X %s Y, X eid %s, Y eid %s' % (self.rtype, self.eidfrom, self.eidto)
if self._cw.execute(srql).rowcount > 0:
return
eidfrom = self._cw.entity_from_eid(self.eidfrom)
eidto = self._cw.entity_from_eid(self.eidto)
#Evaluate the direct relation
target = ''
specialsearch = AutoCompleteEntityRetriever().getSpecialSearch(self._cw, eidfrom, self.rtype, type(eidto).__name__, 'subject')
if specialsearch != ' ':
unrelated = eidfrom.cw_unrelated_rql(self.rtype, type(eidto).__name__, 'subject')
srql = ((unrelated[0] % unrelated[1]) + specialsearch + ', O eid ' + str(self.eidto))
if self._cw.execute(srql).rowcount < 1:
target = ('%(entity)s|%(relation)s%(role)s|%(etype_search)s'
% {'entity': type(eidfrom).__name__,
'relation': self.rtype, 'role': '',
'etype_search': type(eidto).__name__})
helpmsg = self._cw._('Validation error, relation not valid')
if target in AutoCompleteEntityRetriever().HELP_MESSAGES:
helpmsg = self._cw._(AutoCompleteEntityRetriever().HELP_MESSAGES[target])
raise ValidationError(self.eidfrom, {self.rtype: helpmsg})
#Evaluate the reverse relation
target = ''
specialsearch = AutoCompleteEntityRetriever().getSpecialSearch(self._cw, eidto, self.rtype, type(eidfrom).__name__, 'object')
if specialsearch != ' ':
unrelated = eidto.cw_unrelated_rql(self.rtype, type(eidfrom).__name__, 'object')
srql = ((unrelated[0] % unrelated[1]) + specialsearch + ', S eid ' + str(self.eidfrom))
if self._cw.execute(srql).rowcount < 1:
target = ('%(entity)s|%(relation)s%(role)s|%(etype_search)s'
% {'entity': type(eidto).__name__,
'relation': self.rtype, 'role': '_object',
'etype_search': type(eidfrom).__name__})
helpmsg = self._cw._('Validation error, relation not valid')
if target in AutoCompleteEntityRetriever().HELP_MESSAGES:
helpmsg = self._cw._(AutoCompleteEntityRetriever().HELP_MESSAGES[target])
raise ValidationError(self.eidto, {self.rtype: helpmsg})
|
[
"walter.arriaga@softtek.com"
] |
walter.arriaga@softtek.com
|
a7b5924d50b1d26afe48b186debeead1b5c2ba60
|
5e3768b724a93e834eca6c92f54f45bd45b05106
|
/automate_models.py
|
1da485abbe89f744c99b449feb929897d2d07ede
|
[
"MIT"
] |
permissive
|
BorisBorshevsky/ML-Elections
|
e10bd578e2923ef15112165702280ceca8f0f285
|
26a0b7b184deceb7a1c2727ba8e458d565b19512
|
refs/heads/master
| 2021-05-06T19:08:15.226036
| 2018-01-15T00:57:55
| 2018-01-15T00:57:55
| 112,019,076
| 0
| 1
|
MIT
| 2017-12-04T00:48:08
| 2017-11-25T16:43:39
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 3,584
|
py
|
import pandas as pd
import numpy as np
from IPython import embed
from sklearn import clone
from sklearn.model_selection import cross_val_score, train_test_split
from sklearn.multiclass import OneVsOneClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.tree import DecisionTreeClassifier
from sklearn.linear_model import Perceptron
from sklearn.svm import SVC
from sklearn.neural_network import MLPClassifier
from sklearn.svm import LinearSVC
from sklearn.metrics import classification_report
from sklearn.model_selection import cross_val_predict
from sklearn.model_selection import RepeatedStratifiedKFold
from sklearn.metrics import confusion_matrix
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.ensemble import RandomForestClassifier
models = {
"SVC Linear kernel": SVC(kernel='linear'),
"linearSVC OVR": LinearSVC(multi_class='ovr'),
"linearSVC crammer_singer": LinearSVC(multi_class='crammer_singer'),
"One Vs One": OneVsOneClassifier(LinearSVC()),
"Naive Bayes": GaussianNB(),
"Perceptron": Perceptron(max_iter=300),
"LinearDiscriminantAnalysis": LinearDiscriminantAnalysis()
}
def add_parametrized_models():
for splitter in range(2, 20):
models["DecisionTreeClassifier with splitter %d" % splitter] = DecisionTreeClassifier(min_samples_split=splitter,
random_state=0)
for splitter in range(2, 20):
models["RandomForestClassifier with splitter %d" % splitter] = RandomForestClassifier(min_samples_split=splitter,
random_state=0)
for n in range(2, 20):
models["KNeighborsClassifier with n=%d" % n] = KNeighborsClassifier(n_neighbors=n)
def load_prepared_data():
df_train = pd.read_csv('./data/output/processed_train.csv', header=0)
df_test = pd.read_csv('./data/output/processed_test.csv', header=0)
features = list(set(df_train.columns) - {'Vote'})
target = 'Vote'
df_train_X = df_train[features]
df_train_Y = df_train[target]
df_test_X = df_test[features]
df_test_Y = df_test[target]
# labels = {"0":"Blues","1":"Browns","2":"Greens","3":"Greys","4":"Oranges","5":"Pinks","6":"Purples","7":"Reds","8":"Whites","9":"Yellows" }
labels = ["Blues", "Browns", "Greens", "Greys", "Oranges", "Pinks", "Purples", "Reds", "Whites", "Yellows"]
return df_train_X, df_train_Y, df_test_X, df_test_Y, labels
def evaluate_and_get_best(features, target):
max_model = "linearSVC crammer_singer"
max_score = 0
for k, v in models.iteritems():
scores = cross_val_score(v, features, target, cv=15)
score = np.mean(scores)
print "%s - Score: %f" % (k, score)
if score > max_score:
max_score = score
max_model = k
return max_model
def main():
df_train_X, df_train_Y, df_test_X, df_test_Y, labels = load_prepared_data()
train_val_data = pd.concat([df_train_X])
features = train_val_data.values
target = pd.concat([df_train_Y]).values
add_parametrized_models()
best_model_name = evaluate_and_get_best(features, target)
clf = clone(models[best_model_name])
clf.fit(df_test_X, df_test_Y)
print "#######################"
print "Prediction"
print "#######################"
pred = clf.predict(df_test_X)
distribution = np.bincount(pred.astype('int64'))
for index, party in enumerate(distribution):
print "%s, %f, %f" % (labels[index], distribution[index], distribution[index] / float(df_test_Y.size) * 100) + '%'
if __name__ == '__main__':
main()
|
[
"BorisBorshevsky@gmail.com"
] |
BorisBorshevsky@gmail.com
|
b089edef3519feb7f892bdd66d7ebb57fe321c27
|
d214b72b3ae340d288c683afe356de6846a9b09d
|
/动态规划/最大矩形_85.py
|
d5fa9f35ee7dab90956eab9b4c2c0e9f34d1993c
|
[] |
no_license
|
Xiaoctw/LeetCode1_python
|
540af6402e82b3221dad8648bbdcce44954a9832
|
b2228230c90d7c91b0a40399fa631520c290b61d
|
refs/heads/master
| 2021-08-29T15:02:37.786181
| 2021-08-22T11:12:07
| 2021-08-22T11:12:07
| 168,444,276
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,071
|
py
|
from typing import *
class Solution:
def maximalRectangle(self, matrix: List[List[str]]) -> int:
m, n = len(matrix), len(matrix[0])
num_up = [[0] * n for _ in range(m)]
for i in range(n):
if matrix[0][i] == '1':
num_up[0][i] = 1
for i in range(1, m):
for j in range(n):
if matrix[i][j] == '1':
num_up[i][j] = num_up[i - 1][j] + 1
ans = 0
for i in range(m):
pre_zero = -1
min_up = float('inf')
for j in range(n):
if matrix[i][j] == '0':
pre_zero = j
min_up=float('inf')
else:
min_up = min(min_up, num_up[i][j])
ans = max(ans, min_up * (j - pre_zero))
return ans
if __name__ == '__main__':
matrix = [["1", "0", "1", "0", "0"], ["1", "0", "1", "1", "1"], ["1", "1", "1", "1", "1"],
["1", "0", "0", "1", "0"]]
sol=Solution()
print(sol.maximalRectangle(matrix))
|
[
"m18846183092@163.com"
] |
m18846183092@163.com
|
92d91153f08b294489b5212a168455b19ff4682c
|
cd8ca699f1f7ba14f731db76acc025ad97de01fe
|
/ref_sys/oper/migrations/0002_alter_invite_status.py
|
b5418854527b652a7a9dbde1148e21a3edcbc8d4
|
[] |
no_license
|
valbayzak/ref_system
|
8784fef3e85683208bed9a0bf3ae7bd632f44146
|
67fc708d5f28ed2c007a825faa17230460e05481
|
refs/heads/main
| 2023-06-03T18:22:01.572896
| 2021-06-21T16:53:17
| 2021-06-21T16:53:17
| 378,987,422
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 474
|
py
|
# Generated by Django 3.2.4 on 2021-06-21 14:08
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('oper', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='invite',
name='status',
field=models.PositiveSmallIntegerField(choices=[(1, 'ACTIVE'), (2, 'NOT_ACTIVE'), (3, 'ACCEPTED')], default=1, verbose_name='Статус'),
),
]
|
[
"85289417+vallbay@users.noreply.github.com"
] |
85289417+vallbay@users.noreply.github.com
|
42860a6c8058042cf8d6eefda4f3cc8887a54477
|
b7ffa11d72642c5b547f4a48307401cbd379cc43
|
/src/cart/urls.py
|
5948162b0be5bb65c606f441ca98da30f69d521e
|
[] |
no_license
|
gummigun/captainconsole-git
|
8d5a27d042c8a1fe4fa3bf7d89d45ce871ab4eac
|
886e9a86052684256a5473495759996894b261ce
|
refs/heads/master
| 2023-08-14T23:00:02.699957
| 2020-05-15T23:23:44
| 2020-05-15T23:23:44
| 259,331,163
| 0
| 0
| null | 2021-09-22T19:02:10
| 2020-04-27T13:32:13
|
Python
|
UTF-8
|
Python
| false
| false
| 515
|
py
|
from django.urls import path, include, re_path
from . import views
urlpatterns = [
# Home page url pattern
path('', views.index, name="cart"),
path('add/<int:id>', views.update_cart, name="update_cart"),
path('remove/<int:id>', views.remove_cart, name="remove_cart"),
path('review/', views.review, name="review"),
path('checkout/', views.checkout, name="checkout"),
path('process/', views.process, name="process")
#re_path(r'^add/[0-9]$', views.update_cart, name="update_cart"),
]
|
[
"gummi.gunnlaugsson@gmail.com"
] |
gummi.gunnlaugsson@gmail.com
|
c96e3d5f4930ba27639b6713431e4463fe902921
|
74e516e50d5f7181d0ef340f0941e8ffc7b20022
|
/1-100/6/sum_square.py
|
872f7bf384605bbeae6bf4eb7891d10f3711eb67
|
[] |
no_license
|
js837/project-euler
|
fb261a8fc2898f4e86bb66ad8c119c961a8178a6
|
8b32fdbdfda13cf7c8881b400c6ce59334749dad
|
refs/heads/master
| 2016-09-05T16:38:09.639874
| 2015-04-21T21:10:17
| 2015-04-21T21:10:17
| 34,351,684
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 141
|
py
|
# Sum of squares forumla is: n*(n+1)(2*n+1)/6
n=100
sum_sqaures=n*(n+1)*(2*n+1)/6
square_sum=n**2*(n+1)**2/4
print square_sum-sum_sqaures
|
[
"stacey.jake@gmail.com"
] |
stacey.jake@gmail.com
|
1fab3a455107a4685915923d7fdca0125d651eae
|
f7b47ac437f2c60c99fa004a5f11563cc2340c98
|
/migrations/versions/ba949b44fadf_.py
|
6caacd88df68bf1244959e34581efd7d76baafd4
|
[] |
no_license
|
QYJiua/myblogproject
|
23586970b9b8ccdf7aa4a931adde1a7a4e04a673
|
0a6749306ca74bb2d7d1f876c03e945a259c0909
|
refs/heads/master
| 2023-07-29T22:27:07.373173
| 2021-09-16T14:43:29
| 2021-09-16T14:43:29
| 407,197,481
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,876
|
py
|
"""empty message
Revision ID: ba949b44fadf
Revises: 7afb21f57fac
Create Date: 2021-08-28 22:06:03.824638
"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import mysql
# revision identifiers, used by Alembic.
revision = 'ba949b44fadf'
down_revision = '7afb21f57fac'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('article_type',
sa.Column('id', sa.Integer(), autoincrement=True, nullable=False),
sa.Column('type_name', sa.String(length=20), nullable=False),
sa.PrimaryKeyConstraint('id')
)
op.create_table('comment',
sa.Column('id', sa.Integer(), autoincrement=True, nullable=False),
sa.Column('user_id', sa.Integer(), nullable=True),
sa.Column('article_id', sa.Integer(), nullable=True),
sa.Column('comment', sa.String(length=255), nullable=False),
sa.Column('cdatetime', sa.DateTime(), nullable=True),
sa.ForeignKeyConstraint(['article_id'], ['article.id'], ),
sa.ForeignKeyConstraint(['user_id'], ['user.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.add_column('article', sa.Column('type_id', sa.Integer(), nullable=True))
op.alter_column('article', 'user_id',
existing_type=mysql.INTEGER(display_width=11),
nullable=True)
op.create_foreign_key(None, 'article', 'article_type', ['type_id'], ['id'])
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_constraint(None, 'article', type_='foreignkey')
op.alter_column('article', 'user_id',
existing_type=mysql.INTEGER(display_width=11),
nullable=False)
op.drop_column('article', 'type_id')
op.drop_table('comment')
op.drop_table('article_type')
# ### end Alembic commands ###
|
[
"1273884908@qq.com"
] |
1273884908@qq.com
|
1f57a94143af972a289bfc920a65f67f1bd6adf6
|
2f98aa7e5bfc2fc5ef25e4d5cfa1d7802e3a7fae
|
/python/python_17171.py
|
41ccf0e53304977e412d274d7efa76fe1482d35e
|
[] |
no_license
|
AK-1121/code_extraction
|
cc812b6832b112e3ffcc2bb7eb4237fd85c88c01
|
5297a4a3aab3bb37efa24a89636935da04a1f8b6
|
refs/heads/master
| 2020-05-23T08:04:11.789141
| 2015-10-22T19:19:40
| 2015-10-22T19:19:40
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 35
|
py
|
# Integer to Boolean
return a != b
|
[
"ubuntu@ip-172-31-7-228.us-west-2.compute.internal"
] |
ubuntu@ip-172-31-7-228.us-west-2.compute.internal
|
a519de5be7a9fc0af870c2e10b69c7887da9f26a
|
cf97a7de6fad3c917356a0c7fb75bda1b4e31981
|
/unravel/text/legal/glossary.py
|
e866b388acfda0220997586ac84c01a56dd5d2cf
|
[
"Apache-2.0"
] |
permissive
|
unravel-text/unravel
|
a2614de23e0676d5b7027d2e397ee39a0d9942e4
|
d819b90bfd1e4c0dd3157f43595fdbb38ae82d50
|
refs/heads/master
| 2023-06-26T13:52:33.611158
| 2023-06-17T03:41:41
| 2023-06-17T03:41:41
| 155,546,598
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 25
|
py
|
class Glossary:
pass
|
[
"cofiem@gmail.com"
] |
cofiem@gmail.com
|
ed0d0eca931fce65500d604ab0cbc8aa1dbd612c
|
227a045665ea8c5b1822bed84c38b990a1343770
|
/libs/sms.py
|
f6376bdddca1492090f0f94c2f3fc68aebad1f1a
|
[
"Apache-2.0"
] |
permissive
|
theEndProject/aboutTa
|
f31a1629afdf7fc157e219b2499b47c2c2181e98
|
33a1e391e56e76006ee6ef3d9102efc496251cb6
|
refs/heads/main
| 2023-01-02T01:31:10.635058
| 2020-10-26T09:33:05
| 2020-10-26T09:33:05
| 303,625,794
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,185
|
py
|
import time
import json
from hashlib import md5
import requests
from aboutTa import config as cfg
def send_sms(phonenum, vcode):
'''发送短信'''
args = {
'appid': cfg.SD_APPID, # APPID
'to': phonenum, # 手机号
'project': cfg.SD_PROJECT, # 短信模板的ID
'vars': json.dumps({'code': vcode}),
'timestamp': int(time.time()),
'sign_type': cfg.SD_SIGN_TYPE,
}
# 计算参数的签名
sorted_args = sorted(args.items()) # 提取每一项
args_str = '&'.join([f'{key}={value}' for key, value in sorted_args]) # 对参数排序、组合
sign_str = f'{cfg.SD_APPID}{cfg.SD_APPKEY}{args_str}{cfg.SD_APPID}{cfg.SD_APPKEY}' # 拼接成待签名字符串
sign_bytes = sign_str.encode('utf8') # 转换成bytes(二进制)
signature = md5(sign_bytes).hexdigest() # 计算签名
args['signature'] = signature
response = requests.post(cfg.SD_API, data=args)
if response.status_code == 200:
result = response.json()
print('短信结果:', result)
if result.get('status') == 'success':
return True
return False
|
[
"472437593@qq.com"
] |
472437593@qq.com
|
cd42b43018616531062a56884c91ab6fd2f1ea44
|
cc52dc8b6295c9617ae8df40d0dbe9a062f0d7de
|
/dinerindex.py
|
44a69ab9afc22b4c8821608ed28a80df8bd6729d
|
[] |
no_license
|
mariopedregon/python-diner-bottega
|
f98cbcae3f67145b0a163666eb40ceebf91bdcfe
|
43829fed3cf982925c45c62932b839ccc5e30c22
|
refs/heads/master
| 2020-05-23T06:13:59.391487
| 2019-05-14T16:45:29
| 2019-05-14T16:45:29
| 186,662,610
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,257
|
py
|
def main():
totalCost = 0
print('''
"Welcome to the Bottega Diner , What can we get you started today!"
''')
name = input("What is your name?")
print("Hello " + name + "!")
print('''
You get one entree and two side choices at regular cost.
''')
print('''
Here is our menu!
''')
mainMenu(totalCost)
def mainMenu(totalCost):
print("1.Steak $13.99")
print("2.Chicken $11.99")
print("3.Ribs $15.99")
selection = int(input("Enter Choice:"))
print("\n")
if selection == 1:
totalCost += Steak(totalCost)
totalCost += sideMenu(totalCost)
receipt = "your total is $" + str(totalCost)
print(receipt)
elif selection == 2:
totalCost += Chicken(totalCost)
totalCost += sideMenu(totalCost)
receipt = "your total is $" + str(totalCost)
print(receipt)
elif selection == 3:
totalCost += Ribs(totalCost)
totalCost += sideMenu(totalCost)
receipt = "your total is $" + str(totalCost)
print(receipt)
else:
print("Invalid choice. enter 1-3")
mainMenu()
def Steak(totalCost):
print("Great choice!")
totalCost += 8
return totalCost
def Chicken(totalCost):
print("Great choice!")
totalCost += 7.5
return totalCost
def Ribs(totalCost):
print("Great Choice")
totalCost += 6
return totalCost
def sideMenu(totalCost):
print("1.corn on the cob $10.50")
print("2.house salad $7.50")
print("3.Fries $3")
selection = int(input("Enter Choice:"))
if selection == 1:
totalCost += corn(totalCost)
return totalCost
elif selection == 2:
totalCost += house(totalCost)
return totalCost
elif selection == 3:
totalCost += Drink(totalCost)
return totalCost
else:
print("Invalid choice. enter 1-3")
sideMenu()
def corn(totalCost):
print("That'll be $10.50.")
totalCost += 10.5
return totalCost
def house(totalCost):
print("That'll be $7.50")
totalCost += 7.5
return totalCost
def Drink(totalCost):
print("Sweet!")
totalCost += 3
return totalCost
receipt = "your total is $" + str(totalCost)
main()
|
[
"mariopedregon93@gmail.com"
] |
mariopedregon93@gmail.com
|
51b331eccb9809f73598d78540ae4e6af635ee6f
|
f83839dfcbd8bfd5c535f819cf9bb9c303ff3f94
|
/MachineBookExtract/book_tools/characters_tool.py
|
8dce0f703472fc5e254302b519d10a0b74df7fbc
|
[] |
no_license
|
pawo97/MachineBookExtract
|
8cabc4c18a65022c1b61a6bd9e9f47eb60753401
|
33fab023b7b0e48e5fe043f504269d80cb964237
|
refs/heads/main
| 2023-07-25T01:45:43.711512
| 2021-09-06T21:11:32
| 2021-09-06T21:11:32
| 345,401,332
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,519
|
py
|
import traceback
from book_tools.characters_person_rate import characters_person_rate
class characters_tool:
def get_list_non_alpha_numeric(self, words):
non_alpha_words = []
for w in words:
# blank
if w != '':
alphanumeric = ""
for character in w:
if character.isalnum():
alphanumeric += character
non_alpha_words.append(alphanumeric.lower())
return list(dict.fromkeys(non_alpha_words))
def get_second_word(self, li):
li_second_word = []
for i in li:
if ' ' in i:
j = i.split(' ')
if len(j) >= 2:
li_second_word.append(j[1])
else:
li_second_word.append(i)
return li_second_word
def get_words_with_prefix(self, nouns):
prefix_list = []
for s in nouns:
s_final = ''
if (s.startswith('a ') or s.startswith('A ')) and s[2].isupper() and 'CHAPTER' not in s:
s_final = s[2:]
# print("LL", s[2:])
prefix_list.append(s_final)
elif (s.startswith('the ') or s.startswith('The ')) and s[4].isupper() and 'CHAPTER' not in s:
s_final = s[4:]
# print("LL", s[4:])
prefix_list.append(s_final)
return prefix_list
def get_persons_no_duplicates(self, doc):
persons = []
for entity in doc.ents:
if entity.label_ == 'PERSON':
if entity.text[0].isupper():
persons.append(entity.text)
return list(dict.fromkeys(persons))
def get_last_word(self, persons):
new_persons = []
for i in persons:
i = i.replace('\n', ' ')
if ' ' in i:
j = i.split(' ')
if len(j) >= 2:
new_persons.append(j[len(j) - 1])
else:
new_persons.append(i)
return new_persons
def remove_dot_s(self, persons):
new_persons = []
for w in persons:
if w.endswith("’s"):
w = w[0:len(w) - 2]
new_persons.append(w)
return new_persons
def check_spacy_tags(self, nlp, words_selected, persons):
# Create rating list
person_rates = []
for p in persons:
if p != 'the' and p != 'a' and len(p) > 1:
# check spacy tag
doc = nlp(p)
if 'NN' == doc[0].tag_:
person = characters_person_rate()
person.rate = 0
person.word = p
person.tag = doc[0].tag_
person_rates.append(person)
elif 'NNS' == doc[0].tag_:
person = characters_person_rate()
person.rate = 0
person.word = p[0:len(p) - 1]
person.tag = doc[0].tag_
person_rates.append(person)
elif 'NNP' == doc[0].tag_:
person = characters_person_rate()
person.rate = 0
person.word = p
person.tag = doc[0].tag_
person_rates.append(person)
# Count in words
for w in words_selected:
for p in person_rates:
if p.word in w or p.word == w:
p.rate += 1
person_rates.sort(key=lambda x: x.rate, reverse=True)
person_rates = list(dict.fromkeys(person_rates))
return person_rates
def capital_letter_and_not_empty_str_list(self, persons):
del persons[30:]
# capital letter
for i in range(len(persons)):
persons[i] = persons[i].title()
# delete empty strings
final_person = []
for i in range(len(persons)):
if persons[i] != '' and len(persons[i]) > 2:
final_person.append(persons[i])
return final_person
def sum_lists_rates(self, one, two, three):
d = {}
for i in one:
d[i.lower()] = 0
for i in two:
if i not in d.keys():
d[i.lower()] = 0
else:
d[i] += 1
for i in three:
if i not in d.keys():
d[i.lower()] = 0
else:
d[i] += 1
d = list(dict(sorted(d.items(), key=lambda item: item[1], reverse=True)).keys())
return d
def get_characters(self, words, doc, nlp):
try:
words_selected = self.get_list_non_alpha_numeric(words)
# ==================================================================== GET BY TAGS
nouns = [chunk.text for chunk in doc.noun_chunks]
a_the_lists = self.get_words_with_prefix(nouns)
second_words_list = self.get_second_word(a_the_lists)
li_not_alpha = self.get_list_non_alpha_numeric(second_words_list)
li_not_alpha_duplicates = list(dict.fromkeys(li_not_alpha))
# ==================================================================== GET BY WORDS
persons = self.get_persons_no_duplicates(doc)
li_not_space = self.get_last_word(persons)
li_dot_s = self.remove_dot_s(li_not_space)
persons_result_list = self.get_list_non_alpha_numeric(li_dot_s)
# ==================================================================== RATING PERSONS
li_persons = list(dict.fromkeys(persons_result_list))
# Create rating list
li_person_rate = self.check_spacy_tags(nlp, words_selected, li_persons)
del li_person_rate[30:]
li_persons = []
for p in li_person_rate:
li_persons.append(str(p.word))
# ==================================================================== SUM RESULTS
persons_result_list = self.capital_letter_and_not_empty_str_list(persons_result_list)
# sum and the biggest values from three lists
d = self.sum_lists_rates(persons_result_list, li_persons, li_not_alpha_duplicates)
# capitalize first letter
final_list = self.capital_letter_and_not_empty_str_list(d)
except Exception as e:
print(traceback.format_exc())
final_list = []
return final_list
|
[
"pa-wo97@o2.pl"
] |
pa-wo97@o2.pl
|
19907e7cb61cd025d174242e51357e774a777801
|
d257ddf7e6959d0989d76080a8a048e82393657f
|
/002_TemplateMatching/002_template_match_implemented.py
|
112464bcd0690858ab97442b59d77b3d552eca7f
|
[
"MIT"
] |
permissive
|
remichartier/027_selfDrivingCarND_ObjectDetectionExercises
|
d210f37b7baf306dd034c09f62e125b263f8270d
|
ccd853c975d35df5f31e1a445a1a8757b8bd13f5
|
refs/heads/main
| 2023-04-17T08:09:55.465143
| 2021-05-03T07:11:16
| 2021-05-03T07:11:16
| 362,013,745
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,661
|
py
|
import numpy as np
import cv2
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
image = mpimg.imread('bbox-example-image.jpg')
#image = mpimg.imread('temp-matching-example-2.jpg')
templist = ['cutout1.jpg', 'cutout2.jpg', 'cutout3.jpg',
'cutout4.jpg', 'cutout5.jpg', 'cutout6.jpg']
# Here is your draw_boxes function from the previous exercise
def draw_boxes(img, bboxes, color=(0, 0, 255), thick=6):
# Make a copy of the image
imcopy = np.copy(img)
# Iterate through the bounding boxes
for bbox in bboxes:
# Draw a rectangle given bbox coordinates
cv2.rectangle(imcopy, bbox[0], bbox[1], color, thick)
# Return the image copy with boxes drawn
return imcopy
# All the 6 methods for comparison in a list
# methods = ['cv2.TM_CCOEFF', 'cv2.TM_CCOEFF_NORMED', 'cv2.TM_CCORR',
# 'cv2.TM_CCORR_NORMED', 'cv2.TM_SQDIFF', 'cv2.TM_SQDIFF_NORMED']
# Define a function that takes an image and a list of templates as inputs
# then searches the image and returns the a list of bounding boxes
# for matched templates
def find_matches(img, template_list):
# Make a copy of the image to draw on
imcopy = np.copy(img)
# Define an empty list to take bbox coords
bbox_list = []
# Iterate through template list
for temp in template_list:
# Read in templates one by one
templ = mpimg.imread(temp)
print(templ.shape[::-1])
l, w, h = templ.shape[::-1]
# Use cv2.matchTemplate() to search the image
# using whichever of the OpenCV search methods you prefer
#meth = 'cv2.TM_SQDIFF' # --> Not working
meth = 'cv2.TM_CCOEFF' # --> Working
#meth = 'cv2.TM_CCOEFF_NORMED' # --> Working
#meth = 'cv2.TM_CCORR' # --> Not working
#meth = 'cv2.TM_CCORR_NORMED' # --> Working
#meth = 'cv2.TM_SQDIFF' # --> Not working
#meth = 'cv2.TM_SQDIFF_NORMED' # --> Not working
method = eval(meth)
res = cv2.matchTemplate(img,templ,method)
# Use cv2.minMaxLoc() to extract the location of the best match
min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(res)
# Determine bounding box corners for the match
if meth in [cv2.TM_SQDIFF, cv2.TM_SQDIFF_NORMED]:
top_left = min_loc
else:
top_left = max_loc
bottom_right = (top_left[0] + w, top_left[1] + h)
bbox_list.append((top_left,bottom_right))
# Return the list of bounding boxes
return bbox_list
bboxes = find_matches(image, templist)
result = draw_boxes(image, bboxes)
plt.imshow(result)
|
[
"remipr.chartier@gmail.com"
] |
remipr.chartier@gmail.com
|
76958178b7438bb05a58d4bf3edd04bf9ee28403
|
cc212540f928a95fa56f4679e3eb58e2ad329ca5
|
/annpy/training/trainer.py
|
c93d497850a77427e0a1ba0888254a24da4a10e7
|
[
"LicenseRef-scancode-mit-taylor-variant",
"LicenseRef-scancode-warranty-disclaimer"
] |
permissive
|
nirvguy/annpy
|
ec05e07316bddd4bc5fbbd3d9e73ec94dc52a4b9
|
ea5f92048173d0ebd1ad134cf626fa623569905e
|
refs/heads/master
| 2018-06-03T06:11:21.911758
| 2018-05-30T16:16:46
| 2018-05-30T16:16:48
| 118,555,614
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,346
|
py
|
# See LICENSE file for copyright and license details.
import torch
class Trainer(object):
def __init__(self, learning_rule):
self._learning_rule = learning_rule
self._epoch = 0
self._hooks = []
self._remaining_epochs = 0
@property
def epoch(self):
return self._epoch
@staticmethod
def check_batch(batch):
if not isinstance(batch, torch.Tensor):
raise Exception("Batchs must be torch.Tensor's")
if len(batch.shape) <= 1:
raise Exception("Batch shape must have at least dimension two")
def _notify(self, msg):
for hook in self._hooks:
hook.notify(msg)
def train(self, batchs, epochs=1):
if len(batchs) == 0:
return
for batch in batchs:
self.check_batch(batch)
self._remaining_epochs = epochs
self._notify('pre_training')
for _ in range(epochs):
self._notify('pre_epoch')
for batch in batchs:
self._learning_rule.step(batch)
self._epoch += 1
self._remaining_epochs -= 1
self._notify('post_epoch')
self._notify('post_training')
def remaining_epochs(self):
return self._remaining_epochs
def attach(self, hook):
self._hooks.append(hook)
|
[
"nirvguy@gmail.com"
] |
nirvguy@gmail.com
|
41496b02cff46275f641d32d95dd21b748f7f1e3
|
08ff60b74fe11b8aa3d01e69de58a12d44aa6c0b
|
/webServer/webServer.py
|
0a08cd1e39bef63318e3ee77a5247383260cec21
|
[] |
no_license
|
jim-stickney/aquariumController
|
cee7dc477ff6db64adce91911d90b0158f9b31c1
|
a3c4de39fafe21a209c4eeae4a7d4712b9e51eb6
|
refs/heads/master
| 2016-09-06T09:35:27.389195
| 2016-02-27T19:13:52
| 2016-02-27T19:13:52
| 33,930,196
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,875
|
py
|
from flask import Flask, render_template, Markup
import datetime
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import numpy
plt.ioff()
from dateutil import parser
import mpld3
import Pyro4
app = Flask(__name__)
data_logger = Pyro4.Proxy('PYRONAME:data.logger')
@app.route("/")
def hello():
times = []
temps = []
data = data_logger.getData()
if 'avgTemps' in data.keys():
temps += data['avgTemps']
for t in data['avgTimes']:
times.append(parser.parse(t))
if 'temps' in data.keys():
temps += data['temps']
for t in data['time']:
times.append(parser.parse(t))
temps = numpy.array(temps)
fig, ax = plt.subplots()
ax.cla()
ax.plot_date(times, temps, '-')
ax.set_xlabel("Time")
ax.set_ylabel("Temperature (in deg F)")
fig1, ax1 = plt.subplots()
ax1.cla()
nT = len(data['thermostatTime1'])
thermostatTimes = [0]*(nT+1)
thermostatState = numpy.zeros(nT+1)
for iii in range(nT):
thermostatState[iii] = int(data['thermostatState1'][iii] )
thermostatTimes[iii] = parser.parse(data['thermostatTime1'][iii])
thermostatTimes[-1] = datetime.datetime.now()
thermostatState[-1] = thermostatState[-2]
ax1.plot_date(thermostatTimes, thermostatState, '-')
nT = len(data['thermostatTime15'])
thermostatTimes = [0]*(nT+1)
thermostatState = numpy.zeros(nT+1)
for iii in range(nT):
thermostatState[iii] = int(data['thermostatState15'][iii] )
thermostatTimes[iii] = parser.parse(data['thermostatTime15'][iii])
thermostatTimes[-1] = datetime.datetime.now()
thermostatState[-1] = thermostatState[-2]
ax1.plot_date(thermostatTimes, thermostatState, '-')
ax1.set_xlabel("Time")
ax1.set_ylabel("Thermostat State")
nT = len(data['fillingTime0'])
fillingTimes = [0]*(nT+1)
fillingState = numpy.zeros(nT+1)
for iii in range(nT):
fillingState[iii] = int(data['fillingState0'][iii] )
fillingTimes[iii] = parser.parse(data['fillingTime0'][iii])
fillingTimes[-1] = datetime.datetime.now()
fillingState[-1] = fillingState[-2]
fig2, ax2 = plt.subplots()
ax2.cla()
ax2.plot_date(fillingTimes, fillingState, '-')
ax2.set_xlabel("Time")
ax2.set_ylabel("Filling State")
now = datetime.datetime.now()
timeString = now.strftime("%Y-%m-%d %H:%M")
templateData = {'title': "My aquarium status",
'time': timeString,
'tempFigure' : Markup(mpld3.fig_to_html(fig)),
'thermostatFigure' : Markup(mpld3.fig_to_html(fig1)),
'fillingFigure' : Markup(mpld3.fig_to_html(fig2)),
}
return render_template('main.html', **templateData)
app.run(host='0.0.0.0', port=80, debug=True)
|
[
"jim.stickney@gmail.com"
] |
jim.stickney@gmail.com
|
f841e9e9170838ca8c2972ca807eedb0e4ecd954
|
e905abd9bb7bd7017657d0a0c4d724d16e37044c
|
/.history/article/settings_20210208181317.py
|
5959719e37fa4bb9dcbc2f1420a4a206f030284f
|
[] |
no_license
|
tabdelbari/articles
|
a8b921841f84fb473f5ed1cdcda743863e6bc246
|
f0e1dfdc9e818e43095933139b6379a232647898
|
refs/heads/main
| 2023-03-05T10:21:35.565767
| 2021-02-10T13:35:14
| 2021-02-10T13:35:14
| 325,654,973
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,437
|
py
|
# -*- coding: utf-8 -*-
# Scrapy settings for article project
#
# For simplicity, this file contains only settings considered important or
# commonly used. You can find more settings consulting the documentation:
#
# https://docs.scrapy.org/en/latest/topics/settings.html
# https://docs.scrapy.org/en/latest/topics/downloader-middleware.html
# https://docs.scrapy.org/en/latest/topics/spider-middleware.html
MONGO_URI = 'mongodb://localhost:27017/'
MONGO_DATABASE = 'articles'
BOT_NAME = 'article'
SPIDER_MODULES = ['article.spiders']
NEWSPIDER_MODULE = 'article.spiders'
SPLASH_URL = 'http://localhost:8050'
DUPEFILTER_CLASS = 'scrapy_splash.SplashAwareDupeFilter'
HTTPCACHE_STORAGE = 'scrapy_splash.SplashAwareFSCacheStorage'
# Crawl responsibly by identifying yourself (and your website) on the user-agent
#USER_AGENT = 'article (+http://www.yourdomain.com)'
# Obey robots.txt rules
ROBOTSTXT_OBEY = False
# Configure maximum concurrent requests performed by Scrapy (default: 16)
#CONCURRENT_REQUESTS = 32
# Configure a delay for requests for the same website (default: 0)
# See https://docs.scrapy.org/en/latest/topics/settings.html#download-delay
# See also autothrottle settings and docs
DOWNLOAD_DELAY = 10
# The download delay setting will honor only one of:
CONCURRENT_REQUESTS_PER_DOMAIN = 1
CONCURRENT_REQUESTS_PER_IP = 1
# Disable cookies (enabled by default)
COOKIES_ENABLED = True
COOKIES_DEBUG = True
# Disable Telnet Console (enabled by default)
#TELNETCONSOLE_ENABLED = False
# Override the default request headers:
DEFAULT_REQUEST_HEADERS = {
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'Accept-Language': 'en',
}
# Enable or disable spider middlewares
# See https://docs.scrapy.org/en/latest/topics/spider-middleware.html
SPIDER_MIDDLEWARES = {
'scrapy_splash.SplashDeduplicateArgsMiddleware': 100,
}
# Enable or disable downloader middlewares
# See https://docs.scrapy.org/en/latest/topics/downloader-middleware.html
DOWNLOADER_MIDDLEWARES = {
'scrapy_splash.SplashCookiesMiddleware': 723,
'scrapy_splash.SplashMiddleware': 725,
'scrapy.downloadermiddlewares.httpcompression.HttpCompressionMiddleware': 810,
}
# Enable or disable extensions
# See https://docs.scrapy.org/en/latest/topics/extensions.html
#EXTENSIONS = {
# 'scrapy.extensions.telnet.TelnetConsole': None,
#}
# Configure item pipelines
# See https://docs.scrapy.org/en/latest/topics/item-pipeline.html
ITEM_PIPELINES = {
'article.pipelines.MongoPipeline': 300,
}
# Enable and configure the AutoThrottle extension (disabled by default)
# See https://docs.scrapy.org/en/latest/topics/autothrottle.html
#AUTOTHROTTLE_ENABLED = True
# The initial download delay
#AUTOTHROTTLE_START_DELAY = 5
# The maximum download delay to be set in case of high latencies
#AUTOTHROTTLE_MAX_DELAY = 60
# The average number of requests Scrapy should be sending in parallel to
# each remote server
#AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0
# Enable showing throttling stats for every response received:
#AUTOTHROTTLE_DEBUG = False
# Enable and configure HTTP caching (disabled by default)
# See https://docs.scrapy.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
#HTTPCACHE_ENABLED = True
#HTTPCACHE_EXPIRATION_SECS = 0
#HTTPCACHE_DIR = 'httpcache'
#HTTPCACHE_IGNORE_HTTP_CODES = []
#HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'
|
[
"abdelbari1996@hotmail.com"
] |
abdelbari1996@hotmail.com
|
3f0ba0135eca1c3fa561a27e6ca70bbfd56d36ce
|
dbc35bdcec5f64ef8482c709a28e527c6b66d638
|
/company_admin/urls.py
|
d643efa64d0a8b4422c90ea707d9d82e77bb0054
|
[] |
no_license
|
PkQDark/Dosa
|
2bc001f1ab7c2e15ae2c8fb0f2309185024be590
|
bcead811892b2f0c06e2cb5e03cf3f98a0dc9b7b
|
refs/heads/master
| 2021-01-01T06:56:46.259731
| 2017-09-19T17:29:43
| 2017-09-19T17:29:43
| 97,556,407
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,057
|
py
|
from django.conf.urls import url
from .views import dosing, \
cistern_add, cistern_list, cistern_edit, cistern_info, \
add_fuel, fuels_list, fuel_info,\
keys, edit_key, \
users, add_local_user, edit_local_user
urlpatterns = [
url(r'^$', dosing, name='dosing'),
url(r'^cisterns/$', cistern_list, name='cistern_list'),
url(r'^cisterns/add-cistern/$', cistern_add, name='cistern_add'),
url(r'^cisterns/edit/(?P<cist_id>\d+)/', cistern_edit, name='cist_edit'),
url(r'^cisterns/(?P<cist_id>\d+)/', cistern_info, name='cist_info'),
url(r'^fuels/$', fuels_list, name='fuels'),
url(r'^fuels/add-fuel/$', add_fuel, name='add_fuel'),
url(r'^fuels/(?P<fuel_id>\d+)/', fuel_info, name='fuel_info'),
url(r'^keys/$', keys, name='keys'),
url(r'^keys/edit/(?P<key_id>\d+)/', edit_key, name='edit_key'),
url(r'^users/$', users, name='users'),
url(r'^users/add-user/$', add_local_user, name='add_user'),
url(r'^users/edit/(?P<user_id>\d+)/', edit_local_user, name='edit_user'),
]
|
[
"blackbirdvlad@gmail.com"
] |
blackbirdvlad@gmail.com
|
9bd93aab5d5388ddaa38608af37c4a2b0e8f8509
|
d46db380847d51cea1966ba514d856f22251019e
|
/app/core/migrations/0001_initial.py
|
0a6c4c2dffc565e27135176e19bd4e4b03b6e1ba
|
[
"MIT"
] |
permissive
|
bilesanmiahmad/recipe-app-api
|
bc3b2004146ed46bbaf427947db63c8215c3230a
|
b292c0212627513bc62eb48cc187bfb6c5fd0aed
|
refs/heads/master
| 2022-12-22T10:08:15.240112
| 2020-10-01T09:48:27
| 2020-10-01T09:48:27
| 294,135,372
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,715
|
py
|
# Generated by Django 3.1.1 on 2020-10-01 08:54
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
('auth', '0012_alter_user_first_name_max_length'),
]
operations = [
migrations.CreateModel(
name='CustomUser',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('password', models.CharField(max_length=128, verbose_name='password')),
('last_login', models.DateTimeField(blank=True, null=True, verbose_name='last login')),
('is_superuser', models.BooleanField(default=False, help_text='Designates that this user has all permissions without explicitly assigning them.', verbose_name='superuser status')),
('email', models.EmailField(max_length=255, unique=True)),
('name', models.CharField(max_length=255)),
('is_active', models.BooleanField(default=True)),
('is_staff', models.BooleanField(default=False)),
('groups', models.ManyToManyField(blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.', related_name='user_set', related_query_name='user', to='auth.Group', verbose_name='groups')),
('user_permissions', models.ManyToManyField(blank=True, help_text='Specific permissions for this user.', related_name='user_set', related_query_name='user', to='auth.Permission', verbose_name='user permissions')),
],
options={
'abstract': False,
},
),
]
|
[
"fbilesanmi@gmail.com"
] |
fbilesanmi@gmail.com
|
8f885274db507628a34e8f8f094526a25c935972
|
cc9d1aeb8aefe3d4f86c94b4279a64e70bf5fd80
|
/setup.py
|
be0365371238e8e2c7a86eb0bd4aa3c81f749446
|
[
"MIT"
] |
permissive
|
sdelquin/sendgrify
|
a520a2da7d6c6d7c4707c325f6d67523e53803eb
|
fe8ee1d0efd0c8d8034d1c57cfc07672f77d7e8e
|
refs/heads/main
| 2023-06-11T15:49:27.284693
| 2023-05-28T12:54:34
| 2023-05-28T12:54:34
| 342,843,979
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 650
|
py
|
# read the contents of your README file
from pathlib import Path
from setuptools import setup
this_directory = Path(__file__).parent
long_description = (this_directory / 'README.md').read_text()
REQUIREMENTS = (
'sendgrid==5.3.0',
'markdown',
)
setup(
name='sendgrify',
version='2.0.3',
url='https://github.com/sdelquin/sendgrify.git',
author='Sergio Delgado Quintero',
author_email='sdelquin@gmail.com',
description='SendGrid for Humans',
license='MIT',
packages=['sendgrify'],
install_requires=REQUIREMENTS,
long_description=long_description,
long_description_content_type='text/markdown',
)
|
[
"sdelquin@gmail.com"
] |
sdelquin@gmail.com
|
fde156d69337a167c10dea149b053022dba9878a
|
bd38b6be261e997e1a34694b70f3e9fa22e73c8e
|
/StatMyBallsApi/migrations/0003_goal_goal_date.py
|
a065c4260cca8911bebe0264d213452e776fbc15
|
[] |
no_license
|
Strapontin/StatMyBallsDjango
|
05e73a502a8db8bdeeeef7533a1a3514773261b4
|
8082b2630a2ddf4dded999636c8fd39b0fb65b0a
|
refs/heads/main
| 2023-04-27T17:17:12.815401
| 2021-05-16T19:27:32
| 2021-05-16T19:27:32
| 305,686,523
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 526
|
py
|
# Generated by Django 3.1.3 on 2020-11-11 10:07
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('StatMyBallsApi', '0002_auto_20201111_1058'),
]
operations = [
migrations.AddField(
model_name='goal',
name='goal_date',
field=models.DateTimeField(auto_now_add=True, default=django.utils.timezone.now, verbose_name='goal_date'),
preserve_default=False,
),
]
|
[
"cedric.hernandez.soto@gmail.com"
] |
cedric.hernandez.soto@gmail.com
|
0085fd62735222af905666b50be03358a0c3f7ec
|
1b52887970b2ed95e73b950862a050b58fa7269d
|
/network/core_net.py
|
c17213c039d4648e74e04ed41b5518b5566d0c86
|
[] |
no_license
|
FreescaleFlyaway/lizard
|
1516ff009f08a742ad835134f4278202a9714355
|
3db9d49cb45ff13f295f77fa592467cf793611c9
|
refs/heads/master
| 2020-05-19T10:12:13.315447
| 2018-03-26T08:36:43
| 2018-03-26T08:36:43
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,906
|
py
|
# $File: core_net.py
# $Author: Harvey Chang
import tensorflow as tf
import numpy as np
def actor_net(obs_ph, act_dim, suppress_ratio=1.0):
with tf.variable_scope('actor'):
obs_dim = obs_ph.shape.as_list()[-1] # the last dim of shape
hid1_size = obs_dim * 10
hid3_size = act_dim * 10
hid2_size = int(np.sqrt(hid1_size * hid3_size))
# hidden net:
out = tf.layers.dense(obs_ph, hid1_size, tf.tanh,
kernel_initializer=tf.random_normal_initializer(
stddev=suppress_ratio*np.sqrt(1/obs_dim)), name="h1")
out = tf.layers.dense(out, hid2_size, tf.tanh,
kernel_initializer=tf.random_normal_initializer(
stddev=suppress_ratio*np.sqrt(1/hid1_size)), name="h2")
out = tf.layers.dense(out, hid3_size, tf.tanh,
kernel_initializer=tf.random_normal_initializer(
stddev=suppress_ratio*np.sqrt(1/hid2_size)), name="h3")
means = tf.layers.dense(out, act_dim, tf.tanh, kernel_initializer=tf.random_normal_initializer(
stddev=suppress_ratio*np.sqrt(1 / hid3_size)), name='means')
# variance:
log_vars = tf.get_variable('logvars', [act_dim], tf.float32,
tf.random_normal_initializer(mean=-2, stddev=1.0/act_dim))
sigma_init = tf.variables_initializer([log_vars], 'sigma_initializer')
sigma = tf.exp(log_vars)
return means, sigma, sigma_init
def critic_net(obs_ph, suppress_ratio=1.0):
with tf.variable_scope('critic'):
obs_dim = obs_ph.shape.as_list()[-1]
hid1_size = obs_dim * 10
hid3_size = 10
hid2_size = int(np.sqrt(hid1_size * hid3_size))
out = tf.layers.dense(obs_ph, hid1_size, tf.tanh,
kernel_initializer=tf.random_normal_initializer(
stddev=suppress_ratio * np.sqrt(1 / obs_dim)), name="h1")
out = tf.layers.dense(out, hid2_size, tf.tanh,
kernel_initializer=tf.random_normal_initializer(
stddev=suppress_ratio * np.sqrt(1 / hid1_size)), name="h2")
out = tf.layers.dense(out, hid3_size, tf.tanh,
kernel_initializer=tf.random_normal_initializer(
stddev=suppress_ratio * np.sqrt(1 / hid2_size)), name="h3")
out = tf.layers.dense(out, 1,
kernel_initializer=tf.random_normal_initializer(
stddev=suppress_ratio * np.sqrt(1 / hid3_size)), name='output')
out = tf.squeeze(out)
return out
def activate_net(obs_ph):
with tf.variable_scope('activate'):
obs_dim = obs_ph.shape.as_list()[-1]
hid1_size = obs_dim * 10
hid3_size = 10
hid2_size = int(np.sqrt(hid1_size * hid3_size))
out = tf.layers.dense(obs_ph, hid1_size, tf.tanh,
kernel_initializer=tf.random_normal_initializer(
stddev=np.sqrt(1 / obs_dim)), name="h1")
out = tf.layers.dense(out, hid2_size, tf.tanh,
kernel_initializer=tf.random_normal_initializer(
stddev=np.sqrt(1 / hid1_size)), name="h2")
out = tf.layers.dense(out, hid3_size, tf.tanh,
kernel_initializer=tf.random_normal_initializer(
stddev=np.sqrt(1 / hid2_size)), name="h3")
out = tf.layers.dense(out, 1,
kernel_initializer=tf.random_normal_initializer(
stddev=np.sqrt(1 / hid3_size)), name='output')
out = tf.squeeze(out)
return out
|
[
"zhengwxthu@163.com"
] |
zhengwxthu@163.com
|
77c55d04b0a750c8b0c0dc571cf5927a6d78e179
|
356f3f1b7caf0ccb20cc830d40821dfb2cbda046
|
/sfit/sfit/doctype/items/items.py
|
c1943c13dec9e21c63e99267eb3e87e7de102726
|
[
"MIT"
] |
permissive
|
vignesharumainayagam/sfit
|
f4b75b9a8b2de08d0eaa4eadbcd3d5e432ffba56
|
a96afbf35b0e1635e44cb5f83d7f86c83abedb8f
|
refs/heads/master
| 2021-09-05T18:22:43.494208
| 2018-01-30T07:23:02
| 2018-01-30T07:23:02
| 104,332,803
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 257
|
py
|
# -*- coding: utf-8 -*-
# Copyright (c) 2017, Valiant Systems and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe.model.document import Document
class Items(Document):
pass
|
[
"vigneshwaran@valiantsystems.com"
] |
vigneshwaran@valiantsystems.com
|
c42d909697d0db5a72ae51a3c5d635841a1787f8
|
a8fca7b6bc1f0eeaba12b682a81d880dc71cc929
|
/FlaskEndpoint/tests/system/test_home.py
|
38225c4925d80136cac8cbc7e3a04b5a0ac7ca4e
|
[] |
no_license
|
sineczek/Automated-Software-Testing-with-Python
|
cb74d8714ad5b2ec9a6ffc013a400f0181f8095b
|
2e7c4ff4bb5acfd53afb43a4bfa7191eb58a899c
|
refs/heads/main
| 2023-04-14T08:15:53.917614
| 2021-04-24T17:18:23
| 2021-04-24T17:18:23
| 345,342,351
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 454
|
py
|
from tests.system.base_test import BaseTest
import json
class TestHome(BaseTest):
def test_home(self):
with self.app() as c:
resp = c.get('/')
self.assertEqual(
resp.status_code, 200
)
self.assertEqual(
json.loads(resp.get_data()), # loads - ładuje stringa; potem zmienia go na json'a czyli słownik
{'message': 'Hello, world!'}
)
|
[
"michalzaitz@gmail.com"
] |
michalzaitz@gmail.com
|
d8a49d368a82b0008bacdd568c57aa745bde3133
|
d86ed2c37a55b4a3118131a04f9a68dbd3b51a7f
|
/sherpatest/lev3fft-bar.py
|
384437c626b0fbb39addb9d1c5274f6e57b5fd62
|
[] |
no_license
|
hamogu/sherpa-test-data
|
f745cc907c2535a721d46472b33f7281bd6e6711
|
77d9fc563875c59a4acff2960d46180ee7a8ec14
|
refs/heads/master
| 2023-06-18T22:30:44.947033
| 2020-08-03T12:07:13
| 2020-08-03T12:07:13
| 275,202,255
| 0
| 0
| null | 2020-06-26T16:38:19
| 2020-06-26T16:38:19
| null |
UTF-8
|
Python
| false
| false
| 1,244
|
py
|
#!/usr/bin/env python
from sherpa.astro.ui import *
image_file = "acisf07999_000N001_r0035_regevt3_srcimg.fits"
psf_file = "acisf07999_000N001_r0035b_psf3.fits"
reg_file = "ellipse(3145.8947368421,4520.7894736842,37.0615234375,15.3881587982,92.2273254395)"
srcid = 1
load_data(srcid, image_file)
load_psf("psf%i" % srcid, psf_file)
set_psf(srcid, "psf%i" % srcid)
set_coord(srcid, "physical")
notice2d_id(srcid, reg_file)
# Switch to WCS for fitting
set_coord(srcid, "wcs")
# Use Nelder-Mead, C-statistic as fit method, statistic
set_method("neldermead")
set_stat("cstat")
set_source(srcid, 'gauss2d.src + const2d.bkg')
guess(srcid, src)
image_file = "acisf08478_000N001_r0043_regevt3_srcimg.fits"
psf_file = "acisf08478_000N001_r0043b_psf3.fits"
reg_file = "ellipse(3144.5238095238,4518.8095238095,25.2978591919,19.1118583679,42.9872131348)"
srcid = 2
load_data(srcid, image_file)
load_psf("psf%i" % srcid, psf_file)
set_psf(srcid, "psf%i" % srcid)
set_coord(srcid, "physical")
notice2d_id(srcid, reg_file)
# Switch to WCS for fitting
set_coord(srcid, "wcs")
# Use Nelder-Mead, C-statistic as fit method, statistic
set_method("neldermead")
set_stat("cstat")
set_source(srcid, 'gauss2d.src + const2d.bkg')
guess(srcid, src)
fit()
|
[
"olaurino@cfa.harvard.edu"
] |
olaurino@cfa.harvard.edu
|
fa09f73820c428ce2463a685f915692ae007b9f1
|
782160673937ccf69c809e1ed3edca6d08bbc171
|
/Chapter5/5-9.py
|
667e06267d49334e52d8fc311668b4d42a1e84dd
|
[] |
no_license
|
ParkEunbin/17.02-Python
|
6549f4fd10fe366f100082dd3834135aef584562
|
8e3831d8a911263ddd7b9f83bb26bcc79e34efb0
|
refs/heads/master
| 2021-06-18T08:03:07.099654
| 2021-01-20T05:38:10
| 2021-01-20T05:38:10
| 158,062,168
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 255
|
py
|
#과일 가격계산
fruit={"apple":1500,"banana":1300,"melon":2000}
for a,b in fruit.items():
print(a,b)
total=0
for c in fruit.keys():
print("%s의 갯수:"%c, end=" ")
num=int(input())
total+=(fruit[c]*num)
print(total,"원")
|
[
"noreply@github.com"
] |
noreply@github.com
|
342d4a56a9680e0518979d48af56e27109bc1403
|
56c89d49b0b5fd567783f056637a312d81b187bd
|
/lab 3/a5.py
|
10240b377d5d4806ada151f46c5f9b54e8e4067c
|
[] |
no_license
|
vedeninvv/Algorithms
|
acdfb16721437a81d8b0c5abd5b5185abf45254d
|
8e4e0bbc9ebf872f44ebbe709b6045f453e42aee
|
refs/heads/master
| 2022-12-19T06:08:35.259425
| 2020-09-28T18:31:35
| 2020-09-28T18:31:35
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 539
|
py
|
fin = open("radixsort.in")
fout = open("radixsort.out", "w")
input = fin.readline().split()
n = int(input[0])
m = int(input[1])
k = int(input[2])
strings = []
for line in fin:
strings.append(line[0: len(line) - 1])
i = min(k, m)
while i > 0:
buf = [[] for j in range(57)]
for j in range(n):
buf[ord(strings[j][m - 1]) - 65].append(strings[j])
kol = 0
for j in range(57):
for el in buf[j]:
strings[kol] = el
kol += 1
i -= 1
m -= 1
for i in strings:
print(i, file=fout)
|
[
"vedeninvv@mail.ru"
] |
vedeninvv@mail.ru
|
07b351aef518fd66e7d562465c2c742ce426dfb8
|
f73d1fcf5ab749a703881971310518762c823713
|
/BarrettDylanRockPaperScissors/RPS.py
|
8824f750bb8b3436f96c8a008a04bf22e446583f
|
[] |
no_license
|
DylanBarrett/IT1040-Mizzou
|
42f3766158b1c30178f8004303062ea06b7026f8
|
64c7e07af83d45c79974e469d4225adbf145ae08
|
refs/heads/master
| 2020-03-26T15:33:24.407276
| 2018-08-16T23:52:42
| 2018-08-16T23:52:42
| 145,050,316
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,172
|
py
|
import random
import pickle
class GameStatus():
def __init__(self, name):
self.tie = 0
self.playerWon = 0
self.pcWon = 0
self.name = name
def get_round(self):
return self.tie + self.playerWon + self.pcWon + 1
# Displays program information, starts main play loop
def main():
print("Welcome to a game of Rock, Paper, Scissors!")
print("What would you like to choose?")
print("")
game_status = welcomemenu()
while True:
play(game_status)
endGameSelect(game_status)
def welcomemenu():
while True:
print("[1]: Start New Game")
print("[2]: Load Game")
print("[3]: Quit")
print("")
menuselect = int(input("Enter your choice: "))
if int(menuselect) in [1, 2, 3]:
break
else:
print("Wrong choice. select again.")
if menuselect == 1:
name = input("What is your name?: ")
print(("Hello %s.") % name)
print("Let's play!")
game_status = GameStatus(name)
elif menuselect == 2:
while True:
name = input("What is your name?: ")
try:
user_file = open('%s.rsp' % name, 'r')
except IOError:
print(("Sorry there is no game found with name %s") % name)
continue
break
print(("Welcome back %s.") % name)
print("Let's play!")
game_status = pickle.load(user_file)
displayScoreBoard(game_status)
user_file.close()
elif menuselect == 3:
print("Bye!!!")
exit()
return
return game_status
def play(game_status):
playerChoice = int(playerMenu())
pcChoice = pcGenerate()
outcome = evaluateGame(playerChoice, pcChoice)
updateScoreBoard(outcome, game_status)
def playerMenu():
print("Select a choice: \n [1]: Rock \n [2]: Paper \n [3]: Scissors\n")
menuSelect = int(input("What will it be? "))
while not validateInput(menuSelect):
invalidChoice(menuSelect)
menuSelect = input("Enter a correct value: ")
return menuSelect
def validateInput(menuSelection):
if menuSelection in [1, 2, 3]:
return True
else:
return False
def pcGenerate():
pcChoice = random.randint(1,3)
return pcChoice
# Calculate ties,wins,lose
def evaluateGame(playerChoice, pcChoice):
rsp = ['rock', 'paper', 'scissors']
win_statement = ['Rock breaks scissors', 'Paper covers rock', 'Scissors cut paper']
win_status = (playerChoice - pcChoice) % 3
print(("You have chosen %s") % rsp[playerChoice - 1])
what_to_say =(("Computer has chose %s") % rsp[pcChoice - 1])
if win_status == 0:
what_to_say +=(" as Well. TIE!")
elif win_status == 1:
what_to_say +=((". %s. You WIN!") % win_statement[playerChoice - 1])
else:
what_to_say +=((". %s. You LOSE!") % win_statement[pcChoice - 1])
print(what_to_say)
return win_status
# Update track of ties, player wins, and computer wins
def updateScoreBoard(outcome, game_status):
if outcome == 0:
game_status.tie += 1
elif outcome == 1:
game_status.playerWon += 1
else:
game_status.pcWon += 1
# If user input is invalid, let them know.
def invalidChoice(menuSelect):
print(menuSelect,("is not a valid option. Please select 1-3"))
# Print the scores before terminating the program.
def displayScoreBoard(game_status):
print("")
print("Statistics:")
print(("Ties: %d") % game_status.tie)
print(("Player Wins: %d") % game_status.playerWon)
print(("Computer Wins: %d") % game_status.pcWon)
if game_status.pcWon > 0:
print(("Win/Loss Ratio: %f") % (float(game_status.playerWon) / game_status.pcWon))
else:
print("Win/Loss Ratio: Always Win.")
print(("Rounds: %d") % game_status.get_round())
def endGameSelect(game_status):
print("")
print("[1]: Play again")
print("[2]: Show Statistics")
print("[3]: Save Game")
print("[4]: Quit")
print("")
while True:
menuselect = int(input("Enter your choice: "))
if menuselect in [1, 2, 3, 4]:
break
else:
print("Wrong input.")
if menuselect == 2:
displayScoreBoard(game_status)
endGameSelect(game_status)
elif menuselect == 3:
def load_users(self):
try:
f = open("%s.rsp" % game_status.name, 'wb')
pickle.dump(game_status, f)
f.close()
except:
print("error loading make sure file is valid")
print("Your game is saved successfully.")
endGameSelect(game_status)
elif menuselect == 4:
print("Bye!!!")
exit()
main()
|
[
"noreply@github.com"
] |
noreply@github.com
|
30bcc49e425481fed6a6df0a85ca78f5255b3b93
|
200abee8ebb5fa255e594c8d901c8c68eb9c1a9c
|
/venv/03_old/hello_world.py
|
3ef7463bc3ca43192af6add6ec132d91cd3a73f7
|
[] |
no_license
|
Vestenar/PythonProjects
|
f083cbc07df57ea7a560c6b18efed2bb0dc42efb
|
f8fdf9faff013165f8d835b0ccb807f8bef6dac4
|
refs/heads/master
| 2021-07-20T14:14:15.739074
| 2019-03-12T18:05:38
| 2019-03-12T18:05:38
| 163,770,129
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 9,760
|
py
|
'''def sum(param1, param2):
return param1 + param2 # return result to the function caller
param1 = int(input())
param2 = int(input())
c = sum(param1, param2)
print(c)
nlen=0
def checkPalindrome(inputString):
nlen = len(inputString)
if nlen == 1:
t = True
else:
for i in range(nlen//2):
if inputString[i] == inputString[-i-1]:
t = True
else:
t = False
break
return t
inputString = "sasass"
print(checkPalindrome(inputString))'''
'''def adjacentElementsProduct(inputArray):
test = inputArray[0]*inputArray[1]
for i in range((len(inputArray)-2)):
nmax = inputArray[i+1]*inputArray[i+2]
if test < nmax:
test = nmax
return test
inputArray = [6, 2, 3, 8]
max = 0
max = adjacentElementsProduct(inputArray)
print(max)'''
'''sequence = [1, 3, 2, 1]
count = 0
t = True
t1 = True
t2 = True
narray = list(sequence)
for b in range(2):
for i in range(len(narray)-1):
if narray[i] < narray[i-1]:
narray[i-1:i] = []
count += 1
if count < 2:
t1 = False
count = 0
narray2 = list(sequence)
narray = list(sequence)
for b in range(2):
for i in range(len(narray)-1):
if narray[i] < narray[i-1]:
narray[i:i+1] = []
count += 1
if count < 2:
t1 = False
t = t1 or t2
print(narray)
print(narray2)
print(t1, t2, t)'''
'''t = True
count = 0
for i in range(len(sequence)):
if count > 2:
data = False
break
if i+1 < len(sequence) and sequence[i] >= sequence[i+1]:
count += 1
if i+2 < len(sequence) and sequence[i] >= sequence[i+2]:
count += 1
print(t)
'''
'''matrix = [[1,1,1],
[2,2,2],
[3,3,3]]
price = 0
for i in range(len(matrix)):
for j in range(len(matrix[i])):
if i != 0 and matrix[i-1][j] == 0:
matrix[i][j] = 0
for row in matrix:
for elem in row:
price += elem
print(price)'''
'''inputArray = ["aba", "aa", "ad", "vcd", "aba"]
lenw = 0
out = []
for i in range(len(inputArray)):
if lenw < len(inputArray[i]):
lenw = len(inputArray[i])
for i in range(len(inputArray)):
if len(inputArray[i]) == max(len(s) for s in inputArray):
out.append(inputArray[i])
print(out)'''
'''s1 = "aabzca"
s2 = "adcaaz"
n = 0
for i in s1:
if i in s2:
n +=1
s2 = s2.replace(i, "0", 1)
print(n)'''
'''n = str(int(123610))
mid = len(n)//2
n1 = n[:mid]
n2 = n[mid:]
sum1 = 0
for i in range(len(n1)):
sum1 +=int(n1[i])
for i in range(len(n2)):
sum1 -=int(n2[i])
if sum1 == 0:
out = "Счастливый"
else:
out = "Обычный"
print(out)'''
'''s = 'aaaabbcccaabb'
t = s[0]
count = 0
out = ''
for i in s:
if i == t:
count += 1
else:
out = out + t+str(count)
t = i
count = 1
out = out + t + str(count)
print(t, out)'''
'''a = [23, 54, -1, 43, 1, -1, -1, 77, -1, -1, -1, 3]
print([1, 3, -1, 23, 43, -1, -1, 54, -1, -1, -1, 77])
m = max(a)
for i in range(1, len(a)):
if a[-i] != -1:
a[-i], a[a.index(m)] = a[a.index(m)], a[-i]
m = max(a[:-i])
print(a)
'''
'''s = "The ((quick (brown) (fox) jumps over the lazy) dog)"
count = s.count('(')
op = []
cl = []
id = 0
for ch in s:
if ch == '(':
op.append(id)
id += 1
op = op[::-1]
id = 0
'ускорить поиск скобок путем определения начала поиска'
for i in range(count):
for ch in s:
if ch == ')' and id > op[i] and id not in cl:
cl.append(id)
break
id += 1
id = 0
for i in range(count):
sh = s[op[i]+1:cl[i]]
s = s.replace(sh, sh[::-1])
s = s.replace("(", "")
s = s.replace(")", "")
print(s)'''
'''s = "The ((quick (brown) (fox) jumps over the lazy) dog)"
while ')' in s:
j = s.index(')')
i = s.rindex('(', 0, j)
s = s[:i] + s[j-1:i:-1] + s[j+1:]
print(s)'''
'''a = [50]
b = [0,0]
for i in range(len(a)):
b[i%2] += a[i]
print(b)'''
'''
a = ["*****",
"*abc*",
"*ded*",
"*****"]
picture = ["abc", "ded"]
picture.insert(0,"*" * len(picture[0]))
picture.append("*" * len(picture[0]))
for i in range(len(picture)):
test = picture[i]
test = "*" + test + "*"
picture[i] = test
print(picture)'''
'''def areSimilar(a, b):
idx = []
if len(a) != len(b):
return False
for i in range(len(a)):
if a[i] != b[i]:
idx.append(i)
if len(idx) == 0:
return True
if len(idx) != 2:
return False
if a[idx[0]] == b[idx[1]] and a[idx[1]] == b[idx[0]]:
return True
else:
return False
'заносим в массив idx только те символы, которые не совпадают в исходных массивах, если таких символов только две пары, то проверяем взаимозаменяемость пар'
a = [1, 2, 2]
b = [2, 1, 1]
print(areSimilar(a, b))'''
'''def arrayChange(inputArray):
n = 0
for i in range(1, len(inputArray)):
if inputArray[i] <= inputArray[i-1]:
n += inputArray[i - 1] - inputArray[i] + 1
inputArray[i] += inputArray[i-1] - inputArray[i] +1
return n
inputArray = [2, 3, 3, 5, 5, 5, 4, 12, 12, 10, 15]
print(arrayChange(inputArray))
'''
'''a = [int(i) for i in input().split()]
b = []
ans = ''
for i in range(len(a)):
if a.count(a[i]) > 1 and (a[i] not in b):
b.append(a[i])
for i in b:
ans += str(i) + ' '
print(ans)
'''
'''
проверка строки на возможность получить палиндром перестановкой символов.
считаем только символы, количество которых нечетное и заносим в массив
def palindromeRearranging(inputString):
a = []
for i in range(len(inputString)):
if inputString.count(inputString[i]) % 2 != 0:
if inputString[i] != inputString[i-1]:
a.append(inputString.count(inputString[i]))
return len(a) <= 1
task = 'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaccc'
print(palindromeRearranging(task))
'''
'''САПЕР codesignal
def minesweeper(matrix):
row, col = len(matrix), len(matrix[0])
ans = [[0 for c in range(col)] for r in range(row)]
for i in range(row):
for j in range(col):
if matrix[i][j]:
ans[i][j] = -1
for di in range(-1, 2):
for dj in range(-1, 2):
ai = i + di
aj = j + dj
if 0 <= ai < row and 0 <= aj < col and matrix[ai][aj]:
ans[i][j] += 1
return ans
task = [[True,False,False],[False,True,False],[False,False,False]]
print(minesweeper(task))
'''
'''
def avoidObstacles(inputArray):
jump = 1
a = 0
while a < max(inputArray)//jump:
jump += 1
for i in range(1, max(inputArray)//jump+1):
if jump*i not in inputArray:
a += 1
else:
a = 0
break
return jump
task = [5, 3, 6, 7, 9]
print(avoidObstacles(task))
'''
''' # эффект блюр для "фотографии"
def boxBlur(image):
row, col = len(image), len(image[0]) # row rows, col columns
ans = []
for i in range(1, row-1):
ans.append([])
for j in range(1, col-1):
flsum = 0
for k in range(-1, 2):
for l in range(-1, 2):
flsum += image[i+k][j+l]
ans[i-1].append(int(flsum/9))
return ans
task = [[7, 4, 0, 1], [5, 6, 2, 2], [6, 10, 7, 8], [1, 4, 2, 0]]
print(boxBlur(task))
'''
'''codesignal является ли имя переменой корректным
def variableName(name):
if not name[0].isalpha() and name[0] != '_':
return False
else:
for i in range(1, len(name)):
if not name[i].isalnum() and name[i] != '_':
return False
return True
name = 'var1_'
print(variableName(name))
'''
'''codesignal
def absoluteValuesSumMinimization(a):
# x = a[0]
list = {}
for i in range(len(a)):
sabs = 0
for j in range(len(a)):
sabs += abs(a[j] - a[-(i+1)])
list[sabs] = a[-(i+1)]
print(list)
return list[min(list)]
test = [1, 1, 3, 4]
print(absoluteValuesSumMinimization(test))
'''
''' задача на брутфорс всех перестановок
def stringsRearrangement(inputArray):
import itertools
perm = list(itertools.permutations(inputArray, len(inputArray))) #полный список всех перестановок
for k in perm: #проверяем каждый вариант перестановки
for i in range(1, len(k)):
a = k[i]
b = k[i-1]
count = 0
for index in range(len(a)):
if a[index] != b[index]:
count += 1
if count != 1:
break
if count == 1:
return True
return False'''
'''#codesignal
#Given array of integers, find the maximal possible sum of some of its k consecutive elements.
def arrayMaxConsecutiveSum(a, k):
c = m = sum(a[:k]) #посчитали исходную сумму
for i in range(len(a) - k):
c = c + a[i + k] - a[i] #уменьшили сумму на предыдущий элемент и увеличили на следующий
m = max(c, m) #проверили максимум и сохранили в m
return m
test = [1, 3, 2, 4]
k = 3
print(arrayMaxConsecutiveSum(test, k))'''
|
[
"vestenar@gmail.com"
] |
vestenar@gmail.com
|
2bd8d6cbcaa1f087d3413725ed8af20316077c61
|
a20db420b58321756676ddf41a2833f0283c6f66
|
/src/Chrysalis.py
|
9b2b01b931d3063fbaa23bb87d37f3ec12972c9c
|
[] |
no_license
|
CPSibo/Chrysalis
|
5a3194cfb0be8c24543ffb51dd52643afea9c2b1
|
a2cfaaf4aeb4ad7adb48f1229ba291a9af6dc263
|
refs/heads/master
| 2020-04-15T07:54:00.249188
| 2019-10-01T01:12:06
| 2019-10-01T01:12:06
| 164,506,527
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,298
|
py
|
# region Imports
import os
import subprocess
import pathlib
import sys
import argparse
import json
from dotenv import load_dotenv
from Utilities.Arguments import args
from Utilities.Logger import Logger
from Subscription import Subscription
from Repositories import REPOSITORIES
from Destinations import DESTINATIONS
# endregion
class Chrysalis:
"""
The entry point for Chrysalis.
Attributes:
subscriptions (dict): Decoded subscription settings.
"""
# region Attributes
subscriptions = []
# endregion
# region Constructors
def __init__(self):
load_dotenv()
self.load_subscriptions()
# endregion
# region Functions
def load_subscriptions(self):
"""
Reads in subscriptions.json and decodes all the settings
into Subscription objects.
"""
with open('src/subscriptions.json', 'r') as myfile:
subscription_encoded=myfile.read()
subscriptions_decoded = json.loads(subscription_encoded)
self.subscriptions = []
for sub in subscriptions_decoded:
self.subscriptions.append(Subscription(dict_config = sub))
def process_subscription(self, subscription: Subscription):
"""
Runs youtube-dl and the post-processing for the given subscription.
Parameters:
subscription (Subscription): The subscription to process.
"""
if not subscription.enabled:
return
Logger.log(r'Chrysalis', r'Processing "{}"...'.format(subscription.name))
self.setup_staging_directory(subscription)
if subscription.logging and subscription.logging.path:
pathlib.Path(subscription.logging.path).parent.mkdir(parents=True, exist_ok=True)
command = self.construct_command(subscription)
subprocess.run(command, shell=True)
def setup_staging_directory(self, subscription: Subscription) -> str:
"""
Constructs and creates the staging directory for the given subscription.
Parameters:
subscription (Subscription): The subscription to process.
Returns:
str: The path to the staging directory.
"""
pathlib.Path(subscription.staging_directory).mkdir(parents=True, exist_ok=True)
return subscription.staging_directory
def construct_command(self, subscription: Subscription) -> str:
"""
Builds the youtube-dl command for the given subscription.
Args:
subscription (Subscription): The subscription to process.
Returns:
str: The youtube-dl command with all desired arguments.
"""
command = r'youtube-dl'
# Add the youtube-dl config path.
if subscription.youtubedl_config.config:
config_path = os.path.join(os.getenv('youtubedl_config_directory'), subscription.youtubedl_config.config)
command += r' --config-location "{}"'.format(config_path)
# Add the metadata-from-title pattern.
if subscription.youtubedl_config.metadata_format:
command += r' --metadata-from-title "{}"'.format(subscription.youtubedl_config.metadata_format)
# Add the output pattern.
if subscription.youtubedl_config.output_format:
output_format = subscription.staging_directory + '/staging_area/' + subscription.youtubedl_config.output_format
command += r' -o "{}"'.format(output_format)
# Add the path to the video ID archive.
if subscription.youtubedl_config.archive:
archive_path = os.path.join(subscription.staging_directory, subscription.youtubedl_config.archive)
command += r' --download-archive "{}"'.format(archive_path)
# Add any extra arguments this sub has.
if subscription.youtubedl_config.extra_commands:
command += " " + subscription.youtubedl_config.extra_commands
# Add the subscription URL.
command += r' "{}"'.format(subscription.url)
# Construct the post-processing call back into
# Chrysalis to be run after each successful download.
if subscription.post_processing:
command += ' --exec \'"{}" "{}" --postprocess {{}} --subscription "{}"\''.format(
sys.executable,
__file__,
subscription.name
)
# Construct the stdout redirect to the log file.
if subscription.logging.path:
command += r' {} "{}"'.format(
'>>' if subscription.logging.append == True else '>',
subscription.logging.path
)
Logger.log(r'Chrysalis', r'Command to be run: [{}]'.format(command))
return command
def postprocess(self, file: str, subscription: Subscription) -> str:
"""
Runs the post-processing for the given youtube-dl output file.
Args:
file (str): Absolute path to the youtube-dl output file.
subscription (Subscription): The settings to process the file under.
Returns:
str: The absolute path to the folder where all the files were moved.
"""
from PostProcessor import PostProcessor
Logger.log(r'Crysalis', r'Starting PostProcessor for {}'.format(file), 1)
postprocessor = PostProcessor(
file = file,
settings = subscription
)
postprocessor.run()
Logger.tabs -= 1
def run(self) -> int:
"""
Entry point for the Chrysalis process.
Returns:
int: Status code.
"""
if args.postprocess is not None:
subs = [item for item in self.subscriptions if item.name == args.subscription]
subscription = subs[0] if subs else None
if not subscription:
return -1
self.postprocess(args.postprocess, subscription)
else:
for subscription in self.subscriptions:
self.process_subscription(subscription)
# endregion
Chrysalis().run()
|
[
"cpsibo@gmail.com"
] |
cpsibo@gmail.com
|
3cd5a1b7ce865bf7a94113f781c663ed6ae8ebe9
|
21ff624abb58c2af27d209f7b1d1e167244b7536
|
/adminLab/adminLab/settings.py
|
cb4f9842ab89239aec4bb420614f742c7ea339ef
|
[] |
no_license
|
SachekDenisHomePc/DjangoLab2-3
|
95c858bcdcbd6458a5eedd6805245d4217e93e7d
|
e062898f91fbabb98605a4207953c3786e4751bf
|
refs/heads/master
| 2021-05-24T12:18:53.901085
| 2020-04-06T16:41:56
| 2020-04-06T16:41:56
| 253,558,121
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,314
|
py
|
"""
Django settings for adminLab project.
Generated by 'django-admin startproject' using Django 3.0.4.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.0/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 't1&t_zu8r+y)1)^b%)w*%ypu^i#@1%(7(fa9n51_62qkktjocg'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'lab1DB',
'Lab2',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'adminLab.urls'
TEMPLATE_DIR = os.path.join(BASE_DIR, "templates")
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [TEMPLATE_DIR, ],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'adminLab.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
},
'Lab2Db': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'lab2Db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/3.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.0/howto/static-files/
STATIC_URL = '/static/'
|
[
"sachek.denis@gmail.com"
] |
sachek.denis@gmail.com
|
39d7269798832e93cc7391c6516b8df87b50ca36
|
59c0669a38c4178f2f5cf8f9dca7553849c286a2
|
/MyPro/pythonScript/QRCodeDetect/Invoice/hough_tansform_bad.py
|
437f292bb460649c54b3fb981f99722309b81288
|
[] |
no_license
|
AUGUSTRUSH8/ImageProcess
|
f33ceaabaac67436df47fd1e1f115a8f44a6f556
|
46fc85b61dab52c3876dfacb4dfd22c962dc13bf
|
refs/heads/master
| 2023-04-27T21:39:36.044320
| 2022-07-04T14:59:35
| 2022-07-04T14:59:35
| 174,789,186
| 31
| 17
| null | 2022-07-06T20:07:14
| 2019-03-10T07:01:13
|
Java
|
UTF-8
|
Python
| false
| false
| 4,007
|
py
|
# -*- coding: utf-8 -*-
import cv2
import numpy as np
from matplotlib import pyplot as plt
import math
def rotate_about_center2(src, radian, scale=1.):
#入参:弧度
w = src.shape[1]
h = src.shape[0]
angle = radian * 180 / np.pi
# now calculate new image width and height
nw = (abs(np.sin(radian)*h) + abs(np.cos(radian)*w))*scale
nh = (abs(np.cos(radian)*h) + abs(np.sin(radian)*w))*scale
# ask OpenCV for the rotation matrix
rot_mat = cv2.getRotationMatrix2D((nw*0.5, nh*0.5), angle, scale)
# calculate the move from the old center to the new center combined
# with the rotation
rot_move = np.dot(rot_mat, np.array([(nw-w)*0.5, (nh-h)*0.5,0]))
# the move only affects the translation, so update the translation
# part of the transform
rot_mat[0,2] += rot_move[0]
rot_mat[1,2] += rot_move[1]
return cv2.warpAffine(src, rot_mat, (int(math.ceil(nw)), int(math.ceil(nh))), flags=cv2.INTER_LANCZOS4)
def get_group(arr):
#按照4个弧度区间分组,返回不为空的分组数据
radian_45 = np.pi/4
radian_90 = np.pi/2
radian_135 = radian_45 * 3
radian_180 = np.pi
ret_arr = [[],[],[],[]]
for i in range(len(arr)):
if arr[i] < radian_45:
ret_arr[0].append(arr[i])
elif arr[i] < radian_90:
ret_arr[1].append(arr[i])
elif arr[i] < radian_135:
ret_arr[2].append(arr[i])
else:
ret_arr[3].append(arr[i])
while [] in ret_arr:
ret_arr.remove([])
#print ret_arr
return ret_arr
def get_min_var_avg(arr):
#按照不同弧度区间分组,返回方差最小的一个分组的弧度平均值
group_arr = get_group(arr)
print(group_arr)
cv2.waitKey(0)
var_arr = []
if len(group_arr) <= 1:
var_arr.append(np.var(group_arr[0]))
print(var_arr)
cv2.waitKey(0)
else:
for i in range(len(group_arr)):
var_arr.append(np.var(group_arr[i]))
print(var_arr)
min_var = 10000
min_i = 0
for i in range(len(var_arr)):
if var_arr[i] < min_var:
min_var = var_arr[i]
min_i = i
#print min_var, i
avg = np.mean(group_arr[min_i])
return avg
def get_rotate_radian(radian, reverse = False):
#旋转弧度转换
radian_45 = np.pi/4
radian_90 = np.pi/2
radian_135 = radian_45 * 3
radian_180 = np.pi
ret_radian = 0
if radian < radian_45:
ret_radian = radian
elif radian < radian_90:
ret_radian = radian - radian_90
elif radian < radian_135:
ret_radian = radian - radian_90
else:
ret_radian = radian - radian_180
if reverse:
ret_radian += radian_90
print(ret_radian)
return ret_radian
def rotate():
image = cv2.imread("test3.jpg", 0)
print(image.shape)
#高斯模糊
blur = cv2.GaussianBlur(image,(7,7),0)#自己调整,经验数据
cv2.imshow('image',blur)
cv2.waitKey(0)
#Canny边缘检测
canny = cv2.Canny(blur, 20, 150, 3)
cv2.imshow("canny",canny)
lines = cv2.HoughLines(canny, 1, np.pi/180, 200)#自己调整,经验数据
#求平均弧度
l = len(lines[0])
print(l)
theta_arr = [lines[0][i][1] for i in range(l)]
print(theta_arr)
cv2.waitKey(0)
rotate_theta = get_min_var_avg(theta_arr)
print(rotate_theta)
#print lines
'''for line in lines[0]:
rho = line[0]
theta = line[1]
a = np.cos(theta)
b = np.sin(theta)
x0 = a*rho
y0 = b*rho
cv2.line(image, (int(x0 - 1000*b), int(y0 + 1000*a)), (int(x0 + 1000*b), int(y0 - 1000*a)), (0,255,0), 2)
#cv2.imshow('image',image)
#cv2.waitKey(0)'''
img2 = rotate_about_center2(image, get_rotate_radian(rotate_theta, image.shape[0] > image.shape[1])) # hight > width
plt.imshow(img2)
plt.show()
if __name__ == '__main__':
rotate()
|
[
"l"
] |
l
|
bc16f890677503af70743dd56dce78a66b1d4d0b
|
726c443d00ca8b43cc2a7559c2ae21fbad3bda10
|
/order/migrations/0008_auto_20191208_1242.py
|
a5bd9f20b96975e7f9a87f15c41c817b9ec1ba97
|
[] |
no_license
|
djleeyuanfang/onlineshop
|
3af8ef9e213ccc3a18f5f61ab20e8c1bfbfdf5b0
|
f3d15614f4104475a98b3d387aee6d2121639c12
|
refs/heads/master
| 2020-11-24T13:37:46.709584
| 2020-08-29T11:36:38
| 2020-08-29T11:36:38
| 228,171,654
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,498
|
py
|
# Generated by Django 3.0 on 2019-12-08 12:42
from django.db import migrations, models
import django.db.models.deletion
import order.models
class Migration(migrations.Migration):
dependencies = [
('order', '0007_auto_20191207_1950'),
]
operations = [
migrations.CreateModel(
name='ImageDir',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('direction', models.CharField(max_length=50, unique=True, verbose_name='分类目录')),
],
),
migrations.RemoveField(
model_name='goodimage',
name='img',
),
migrations.CreateModel(
name='Image',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('file_name', models.CharField(max_length=32, verbose_name='上传文件名')),
('img', models.ImageField(upload_to=order.models.dre_path)),
('ImageDir', models.ForeignKey(null=True, on_delete=django.db.models.deletion.DO_NOTHING, to='order.ImageDir', verbose_name='目录')),
],
),
migrations.AddField(
model_name='goodimage',
name='image',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.DO_NOTHING, to='order.Image', verbose_name='图片'),
),
]
|
[
"1025939769@qq.com"
] |
1025939769@qq.com
|
e396119de92c2a9d0442f560d6abcdd894436e17
|
484f111548e9d7192a5748eb202c08802484d747
|
/fw/flash.py
|
8361fc57a27f60367e21952493f6068dcb8a037a
|
[
"Apache-2.0"
] |
permissive
|
cmcmurrough/moteus
|
dafb2e5224409aaf1d57b66f58965d298845678d
|
6780967ec40ad7f1ab76cdbd7021f2d07b739efe
|
refs/heads/main
| 2023-07-11T10:29:58.645291
| 2021-08-13T13:38:32
| 2021-08-13T13:38:32
| 396,627,837
| 2
| 0
|
Apache-2.0
| 2021-08-16T05:07:08
| 2021-08-16T05:07:07
| null |
UTF-8
|
Python
| false
| false
| 2,193
|
py
|
#!/usr/bin/python3
# Copyright 2021 Josh Pieper, jjp@pobox.com.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import platform
import subprocess
import sys
import tempfile
BINPREFIX = '' if platform.machine().startswith('arm') else 'arm-none-eabi-'
OBJCOPY = BINPREFIX + 'objcopy'
OPENOCD = 'openocd -f interface/stlink.cfg -f target/stm32g4x.cfg '
def main():
tmpdir = tempfile.TemporaryDirectory()
moteus_elffile = (
sys.argv[1]
if len(sys.argv) > 1 else
'bazel-out/stm32g4-opt/bin/fw/moteus.elf')
bootloader_elffile = (
sys.argv[2]
if len(sys.argv) > 2 else
'bazel-out/stm32g4-opt/bin/fw/can_bootloader.elf')
subprocess.check_call(
f'{OBJCOPY} -Obinary ' +
f'-j .isr_vector ' +
f'{moteus_elffile} {tmpdir.name}/out.08000000.bin',
shell=True)
subprocess.check_call(
f'{OBJCOPY} -Obinary ' +
f'-j .text -j .ARM.extab -j .ARM.exidx -j .data -j .bss ' +
f'{bootloader_elffile} {tmpdir.name}/out.0800c000.bin',
shell=True)
subprocess.check_call(
f'{OBJCOPY} -Obinary ' +
f'-j .text -j .ARM.extab -j .ARM.exidx -j .data -j .ccmram -j .bss ' +
f'{moteus_elffile} {tmpdir.name}/out.08010000.bin',
shell=True)
subprocess.check_call(
f'{OPENOCD} -c "init" ' +
f'-c "reset_config none separate; ' +
f' program {tmpdir.name}/out.08000000.bin verify 0x8000000; ' +
f' program {tmpdir.name}/out.0800c000.bin verify 0x800c000; ' +
f' program {tmpdir.name}/out.08010000.bin verify ' +
f' reset exit 0x08010000"',
shell=True)
if __name__ == '__main__':
main()
|
[
"jjp@pobox.com"
] |
jjp@pobox.com
|
75d4242afd465b53edbc623e479a98134dddabf9
|
1735d7a35e9a3bc9b423d6960310b4bb80ca6b42
|
/py3bc/17_functions_03.py
|
deac79e8d38581446c4dc3a10ce91f3d7ee10321
|
[] |
no_license
|
qwertyzhed/python3bootcamp
|
38c0c1f2d354094f90db0fb54c9955c1befe4a56
|
0257ab47155115bf9994e6da77f7daab89d64fc3
|
refs/heads/master
| 2020-03-20T18:39:40.989958
| 2018-06-19T15:54:30
| 2018-06-19T15:54:30
| 137,598,832
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 68
|
py
|
def yell(word):
return print(word.upper() + '!')
yell('fk you')
|
[
"alex@gmail.com"
] |
alex@gmail.com
|
2cd28a19444e56e138b55bd4f48633e1c6bb8a0f
|
085551650c697038bdfaebe4778e9741d3f1431a
|
/dashboard/urls.py
|
a596cb731368ad6c8cb3e4c609ca4d104d86ba18
|
[] |
no_license
|
AlonsoCN/chat-school-project
|
8029f011645c7043c27fd6583532e6dbc4ad063f
|
946b19352406804fd363582be56cd58dc426d149
|
refs/heads/master
| 2020-04-05T23:15:44.616122
| 2016-06-07T18:46:18
| 2016-06-07T18:46:18
| 60,637,883
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 142
|
py
|
from django.conf.urls import patterns, url
urlpatterns = patterns('dashboard.views',
url(r'^$', 'dashboard_view', name='dashboard'),
)
|
[
"luis.alonso.cn@gmail.com"
] |
luis.alonso.cn@gmail.com
|
4080d41a60b85ff5500efacfc8fa63c51b33899f
|
2d1ffb862ec65116f88b0986e4f36d36110cbfe5
|
/app/views.py
|
ced21fb3eae0537fbf78312e2c9f3eb801e59a90
|
[] |
no_license
|
stkc282/wedding
|
c38afc7861119b8cf4490fa35007841d58e161c7
|
1799b72820787a59d0d5b7edf7748b1ab7af9a98
|
refs/heads/master
| 2021-06-18T04:15:20.293547
| 2019-08-19T10:17:13
| 2019-08-19T10:17:13
| 202,826,952
| 0
| 0
| null | 2021-06-10T21:52:12
| 2019-08-17T02:48:38
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 3,288
|
py
|
# from django.contrib.auth.mixins import LoginRequiredMixin
from django.urls import reverse_lazy
from django.views.generic import DetailView
from django.views.generic.edit import CreateView, UpdateView, DeleteView
from django_filters.views import FilterView
from .filters import ItemFilter
from .forms import ItemForm
from .models import Item
from django.shortcuts import render
# # Create your views here.
# # 検索一覧画面
class ItemFilterView(FilterView):
model = Item
#
# # デフォルトの並び順を新しい順とする
# queryset = Item.objects.all().order_by('-created_at')
#
# # django-filter用設定
# filterset_class = ItemFilter
# strict = False
#
# # 1ページあたりの表示件数
# paginate_by = 10
#
# # 検索条件をセッションに保存する
# def get(self, request, **kwargs):
# if request.GET:
# request.session['query'] = request.GET
# else:
# request.GET = request.GET.copy()
# if 'query' in request.session.keys():
# for key in request.session['query'].keys():
# request.GET[key] = request.session['query'][key]
#
# return super().get(request, **kwargs)
# # 検索一覧画面
# class ItemFilterView(LoginRequiredMixin, FilterView):
# model = Item
#
# # デフォルトの並び順を新しい順とする
# queryset = Item.objects.all().order_by('-created_at')
#
# # django-filter用設定
# filterset_class = ItemFilter
# strict = False
#
# # 1ページあたりの表示件数
# paginate_by = 10
#
# # 検索条件をセッションに保存する
# def get(self, request, **kwargs):
# if request.GET:
# request.session['query'] = request.GET
# else:
# request.GET = request.GET.copy()
# if 'query' in request.session.keys():
# for key in request.session['query'].keys():
# request.GET[key] = request.session['query'][key]
#
# return super().get(request, **kwargs)
#
# 詳細画面
class ItemDetailView( DetailView):
model = Item
# # 詳細画面
# class ItemDetailView(LoginRequiredMixin, DetailView):
# model = Item
# 登録画面
class ItemCreateView(CreateView):
model = Item
form_class = ItemForm
success_url = reverse_lazy('thanks')
# 更新画面
class ItemUpdateView(UpdateView):
model = Item
form_class = ItemForm
success_url = reverse_lazy('index')
# 削除画面
class ItemDeleteView(DeleteView):
model = Item
success_url = reverse_lazy('index')
def invitation(request):
# post = get_object_or_404(Post, pk=pk )
return render(request, 'app/invitation.html', {})
def thanks(request):
return render(request, 'app/thanks.html', {})
def access(request):
return render(request, 'app/access.html', {})
# def create(request):
# if request.method == 'POST':
# form_class = ItemForm(request.POST)
# if form_class.is_valid():
# model = form_class.save(commit=False)
# model.save()
# return redirect('index', pk=form_class.pk)
# else:
# form_class = ItemForm
# return render(request, 'app/thanks.html', {'form': form_class})
|
[
"you@example.com"
] |
you@example.com
|
0660e025dd554b5703ef032f89a0902e86d1a771
|
7dad4550554888a865334df8023970378e17ae56
|
/Longitud.py
|
e1fbc88390d8150834b146b0e08cdf5e05066f06
|
[] |
no_license
|
JRLV14/Pensamiento_Computacional
|
8ba79875bfed8d67e76e3d24847c55d61f47e149
|
e2af455f1f7ae8922c414617c3b75ada40c7bc4f
|
refs/heads/master
| 2023-06-09T22:52:54.986837
| 2021-07-02T17:43:09
| 2021-07-02T17:43:09
| 381,734,397
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 240
|
py
|
def run():
Nombre = input("Escribe tu nombre: ")
Nombre = Nombre.replace (" ", "")
letras = int(len (Nombre))
print ("Hola " + Nombre + " Tu nombre tiene " + str(letras) + " letras")
if __name__ == '__main__':
run()
|
[
"jrlv4070@gmail.com"
] |
jrlv4070@gmail.com
|
b536af523c5e69ea18ffeaf04df2fc80b986dd1f
|
a1aea2567fd4176fdcdf66250e933b32d1d3db27
|
/vdW_force.py
|
1587ff12264830cc0ea3cc4fa7f4c66e98e01689
|
[] |
no_license
|
toftul/fiber_binding_advanced
|
c4df1a7b60d392745ac3eb8e4424659750ccafa4
|
5fc737de0ce6f4d4253932044f6f1ef8b4e11e0d
|
refs/heads/master
| 2021-05-15T15:02:11.976213
| 2018-05-25T11:39:00
| 2018-05-25T11:39:00
| 107,274,065
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,400
|
py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat May 12 12:06:10 2018
@author: ivan
"""
import numpy as np
from scipy.integrate import quad
import matplotlib.pyplot as plt
def integrand_drho(H, A, y, phi):
def f(rho):
C1 = H + A + y + rho * np.cos(phi)
return(rho * C1 / (C1 * C1 + rho*rho*np.sin(phi)*np.sin(phi))**3.5)
return(quad(f, 0, A)[0])
def integrand_dphi(H, A, y):
f = lambda phi: integrand_drho(H, A, y, phi)
return(quad(f, 0, np.pi)[0])
def FvdW(H, A, a, A123):
int_y = lambda y: (2 - y) * y * integrand_dphi(H, A, y)
f = -15/4 * quad(int_y, 0, 2)[0]
return(A123/a * f)
# %%
rp = 120e-9 # [m] particle radius
rfMM = 495e-9 # [m] fiber radius
rfSM = 130e-9 # [m] fiber radius
# A = R / a
ASM = rfSM / rp
AMM = rfMM / rp
A123 = 10e-20 # [J] approx value for the Hamaker constant
# gap between particle and fiber surface
gapSpace = np.logspace(np.log10(rp/50), np.log10(5*rp), 30) # [m]
FSpaceSM = np.zeros(len(gapSpace))
FSpaceMM = np.zeros(len(gapSpace))
for i, gap in enumerate(gapSpace):
print(i/len(gapSpace))
FSpaceSM[i] = FvdW(gap/rp, ASM, rp, A123)
FSpaceMM[i] = FvdW(gap/rp, AMM, rp, A123)
# %%
plt.rcParams.update({'font.size': 14})
plt.figure(figsize=(8,5))
#plt.title('Attractive vdW force')
plt.semilogy(gapSpace/rp, np.abs(FSpaceSM)*1e12, 'k-', label='SM')
plt.semilogy(gapSpace/rp, np.abs(FSpaceMM)*1e12, 'k--', label='MM')
plt.xlabel('Relative gap width, $g/R_p$')
plt.ylabel('vdW attractive force $|F^{wdW}_r|$, pN')
#plt.xlim((1e-2, 1e1))
#plt.xlim((np.min(gapSpace/rp), np.max(gapSpace/rp)))
plt.ylim((0.1*np.min(np.abs(FSpaceSM)*1e12), 10*np.max(np.abs(FSpaceSM)*1e12)))
plt.legend()
#plt.grid(True,which="both",ls="-", alpha=0.4)
#plt.savefig('results/vdw.pdf')
plt.show()
# %%
### sanity check
# Compare with
# 'The van der Waals Interaction between a Spherical Particle and a Cylinder'
#A = 1
#
#Hspace = np.logspace(-2, 2, num=10)
#FSpace = np.zeros(len(Hspace))
#for i, H in enumerate(Hspace):
# print(i)
# FSpace[i] = FvdW(H, A, 1, 1)
# %%
# plot
#plt.figure(figsize=(8,5))
#plt.rcParams.update({'font.size': 16})
#plt.loglog(Hspace, -FSpace, '-k')
#plt.xlabel('D, nm')
#plt.ylabel('$F^{wdW}_r$, pN')
#plt.grid()
#plt.show()
# STATUS: CHACKED!
|
[
"toftul.ivan@gmail.com"
] |
toftul.ivan@gmail.com
|
bb6e52fee441903389167e2b4292125b69cdb8b8
|
ce3964c7195de67e07818b08a43286f7ec9fec3e
|
/dl_poly/get_pvt.py
|
6fd5f7613ff6286470a47abe111c368b60d57ff7
|
[] |
no_license
|
zhuligs/physics
|
82b601c856f12817c0cfedb17394b7b6ce6b843c
|
7cbac1be7904612fd65b66b34edef453aac77973
|
refs/heads/master
| 2021-05-28T07:39:19.822692
| 2013-06-05T04:53:08
| 2013-06-05T04:53:08
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,852
|
py
|
#!/usr/bin/env python
# Try retrieving P,V,T, etc. from the STATIS file, may be easier than from OUTPUT...
import os, sys, commands
def readlines(FILE,n):
'''Read n lines from FILE'''
for i in range(n):
FILE.readline()
try:
s = open('STATIS','r')
header1 = s.readline()
header2 = s.readline()
c = open('CONTROL','r')
lines = c.readlines()
for line in lines:
if len(line.split()) == 2:
var, value = line.split()
if var == 'steps':
steps = int(value)
elif var == 'stats':
stats = int(value)
c.close()
except:
print 'Could not open STATIS and CONTROL files successfully--stopping'
sys.exit(0)
# Total energy is row 1 value 1
# Temp is row 1, value 2
# Pres is row 6, value 2
# Vol is row 4, value 4
nblocks = int(steps)/int(stats)
out = open('pvt.dat','w')
out.write('# --Data extracted from STATIS file--\n')
out.write('#tstep\tpres (GPa)\tvol (ang^3)\ttemp (K)\tetot (eV)\t\tpot (eV)\n')
for i in range(nblocks):
tstep, t, elements = s.readline().split()
row1 = s.readline().split()
Etot = str( float(row1[0]) * 1.036426865E-4 ) # convert unit to eV
T = row1[1]
s.readline()
s.readline()
V = s.readline().split()[3]
s.readline()
P = str( float(s.readline().split()[1]) * 0.016605402 ) # convert atm unit to GPa
# Every line has 5 values, each line read is 5 elements gone
leftover = int(elements) - 5*6
if leftover % 5 == 0:
extra_lines = leftover/5
else:
extra_lines = leftover/5 + 1
readlines(s,extra_lines)
# Calculate Etot - 3*k_b*T
k_b = 8.617343E-5 # Boltzmann's const in eV/K
pot = str( float(Etot) - 3*k_b*float(T) )
out.write(tstep+'\t'+P+' \t'+V+'\t'+T+'\t'+Etot+'\t'+pot+'\n')
s.close()
out.close()
|
[
"boates@gmail.com"
] |
boates@gmail.com
|
575ec3fde6902f2199dd97ec7bcc0c58ef03bab3
|
ec0b547830e10e11de13f6d5e375f2ee746c64ff
|
/pos_retail/wizards/__init__.py
|
8dfb1563dff3e3ae399cce0e45dc71a6969c3c6c
|
[] |
no_license
|
babarlhr/third-party-apps
|
cc4a83df48e00d35c3fd7bbd06ed9ef738f1ba99
|
f882e65b9873a937aa7f62171bcefb8b4982366b
|
refs/heads/13.0
| 2023-04-02T14:32:37.980953
| 2021-04-10T19:58:07
| 2021-04-10T19:58:07
| 584,900,514
| 1
| 0
| null | 2023-01-03T20:06:22
| 2023-01-03T20:06:22
| null |
UTF-8
|
Python
| false
| false
| 150
|
py
|
from . import StockImmediateTransfer
from . import RemovePosOrder
from . import PosRemoteSession
from . import PosMakePayment
from . import CashBoxOut
|
[
"lucaslsoto95@gmail.com"
] |
lucaslsoto95@gmail.com
|
1c10e66617f17ae42de12803b880e514daca1829
|
e4e44097320d056f3768eb3a53f28f4c19cdc7ce
|
/findSubstring.py
|
cb2251aa8f1dbb867af20c7a065e334f694cbfda
|
[] |
no_license
|
amisyy/leetcode
|
0640e009c02956778f402eb89b74c98c36882d44
|
ba8ab343a246aa3eead75a23dc69b5a76680d290
|
refs/heads/master
| 2021-06-03T06:27:38.216035
| 2020-11-08T06:59:40
| 2020-11-08T06:59:40
| 103,757,845
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,116
|
py
|
class Solution(object):
def findSubstring(self, s, words):
"""
:type s: str
:type words: List[str]
:rtype: List[int]
"""
word_length = len(words[0])
word_num = len(words)
length = len(s)
words_dict = {}
for i in words:
if i not in words_dict:
words_dict[i] = 1
else:
words_dict[i]+=1
res = []
for i in range(length -word_length*word_num +1 ):
dict_curr={}
j=0
while j < word_num:
word = s[i+j*word_length:i+(j+1)*word_length]
if word not in words_dict:
break
elif word not in dict_curr:
dict_curr[word] = 1
else:
dict_curr[word] +=1
if dict_curr[word] > words_dict[word]:
break
j +=1
if j==word_num:
res.append(i)
return res
u = Solution()
print(u.findSubstring("barfoothefoobarman",["foo","bar"]))
|
[
"amisyy@sina.cn"
] |
amisyy@sina.cn
|
df268ec9f4a7b257c6ca9892b85d4be9155b5b5c
|
27824a7e18764de82ad0a80025d1390094abfe65
|
/introduction/start.py
|
63a1e1009de6a0feceda5b9d04e8a08b2cf91b94
|
[] |
no_license
|
oliviergimenez/python-training
|
aec501740130edc39b78dd87976384732e5fb7df
|
3d0f8f19d1d3dff9d372950dbb95a5f043d60f97
|
refs/heads/master
| 2023-01-28T21:06:46.182978
| 2020-12-08T07:40:34
| 2020-12-08T07:40:34
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,081
|
py
|
# ---
# jupyter:
# jupytext:
# formats: ipynb,py
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.7.1
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Getting started
#
# ## Python Install
#
# ### Anaconda
#
# Is is strongly advised to install Python by using [Anaconda](https://www.anaconda.com/):
#
# - Ready to go Python, with the main libraries installed (Numpy, Scipy, Matplotlib)
# - Possibility to create multiple environments with different versions of Python and packages ([conda](https://conda.io/en/latest/)).
#
# In practice:
#
# - Download the distribution corresponding to your system (cf. [Download](https://www.anaconda.com/distribution/#download-section))
# - Install it in a place where you have read and write access.
#
#
#
#
# ## Running Python
#
# ### Python console
#
# To run Python in normal mode, type in a terminal:
#
# ```
# python
# ```
#
# <img src="figs/console.png" width="70%">
#
# ### Interactive Python console
#
# To run Python in interactive mode, type in a terminal:
#
# ```
# ipython
# ```
#
# <img src="figs/ipython.png" width="70%">
#
# ### Spyder (IDE)
#
# To run the Python IDE, type in a terminal:
#
# ```
# spyder &
# ```
#
# <img src="figs/spyder.png" width="70%">
#
#
# ### Jupyter Notebook
#
# To run the Jupyter Notebook, type in a terminal:
#
# ```
# jupyter notebook &
# ```
#
# <img src="figs/notebook.png" width="70%">
# ## Running scripts
#
# Open a text editor and type in:
#
# ```
# import sys
#
# # my first program (comment)
# print('hello ', sys.argv)
# ```
#
# Save as ```hello.py```
#
# ### Running using python
#
# From the terminal type:
#
# ```
# python hello.py arg1 arg2 arg3
# ```
#
# You should see:
#
# ```
# hello ['hello.py', 'arg1', 'arg2', 'arg3']
# ```
#
# <div class='alert alert-info'>
# <strong>Note: </strong>The <i>sys.argv</i> statements returns the list of arguments, with the 1st element the name of the script.
# </div>
#
#
# ### Running using ipython
#
# Open `ipython` from the terminal, then type:
#
# ```
# run hello.py arg1 arg2 arg3
# ```
#
# To check the environment, type `whos`. You should see:
#
# ```
# In [2]: whos
# Variable Type Data/Info
# ------------------------------
# sys module <module 'sys' (built-in)>
# ```
#
# ### Running from Spyder
#
# Open `spyder`, open the file and click on the **Run -> Configuration per file** menu. Add arguments to the program as follows:
#
# <img src="figs/args_spyder.png" width="40%">
#
# Then, click on the **Run file** button to run all the program or the **Run selection** button to run the current line
#
# <br>
# <figure>
# <center>
# <img src="figs/run_file.png" width="50" text-align=center>
# <figcaption text-align=center><i>Run file button</i></figcaption>
# </figure>
#
# <br>
# <figure>
# <center>
# <img src="figs/run_sel.png" width="50">
# <figcaption text-align=center><i>Run selection button</i></figcaption>
# </figure>
|
[
"nicolas.barrier@ird.fr"
] |
nicolas.barrier@ird.fr
|
14a4fe4b25d073188b3eb9b8c1910bf1d861c999
|
97e06c68fe4ddb3a93ab665b6c8ae3d835eb484b
|
/catch/baidubaike_catch_data.py
|
64196fb01683723e6f1448a6e0a58b0462a3d1d7
|
[] |
no_license
|
baby-H/MachineStudy
|
2b4545ff9af00f9121210fc94469db1f60ad259a
|
4bfb3f9cc13ebbfbf6652d94697d87fd12b47179
|
refs/heads/master
| 2020-04-28T03:25:04.244351
| 2019-05-25T07:44:05
| 2019-05-25T07:44:05
| 174,936,531
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,519
|
py
|
# -*- coding: utf-8 -*-
# Author: Hu Ying Jie ( huyingjie2123@163.com )
from bs4 import BeautifulSoup
import urllib
from urllib import request
import re
import os
def get_html(url):
return urllib.request.urlopen(url, timeout=20).read().decode('utf-8')
temp_str = ''
n = 0
f = open(r'test.txt', 'r', encoding='utf8')
fp = open(r'word_0_out.txt', 'w+', encoding='utf8')
for line in f:
if len(temp_str) > 400000:
fp.close()
path = os.path.join('word_' + str(++n) + '_out.txt')
fp = open(path, 'w+', encoding='utf8')
fp.write(line)
line = line[:-1]
print(line)
try:
url0 = "https://baike.baidu.com/item/"
url = url0 + urllib.parse.quote(str(line))
html = get_html(url)
soup = BeautifulSoup(html, 'html.parser')
if str(soup.title) == '<title>百度百科——全球最大中文百科全书</title>':
print('404')
continue
for text in soup.find_all('div', class_="para"):
for div_tag in text.find_all('div', class_="description"):
div_tag.decompose()
if text.span:
text.span.decompose()
new_str = "".join(text.get_text().split())
new_str = re.sub(r'\[[\d]*\]', '', new_str)
new_str = re.sub(r'\[[\d]*-[\d]\]', '', new_str)
temp_str = temp_str + new_str
fp.write(new_str)
print()
fp.write(u"\n")
except:
print('error')
continue
fp.close()
f.close()
|
[
"huyj@tongc-soft.com"
] |
huyj@tongc-soft.com
|
f879c61a6efd8aa975d90fa1e48070580dd1f2ae
|
8081310a546b0bd93abebbac5066c81b21c38482
|
/utils/helper.py
|
6aaa4e42e5327833afeea950df2eb77bfb12884c
|
[] |
no_license
|
Xiang-Deng-DL/GFKD
|
dd9130169a59216aed63e9fc22baabf758f15add
|
e68e4d6777af526f84ef1efab0a261cb8f5ac968
|
refs/heads/main
| 2023-07-06T18:56:45.224307
| 2021-08-13T17:37:55
| 2021-08-13T17:37:55
| 366,174,979
| 24
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 9,505
|
py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Nov 13 01:33:09 2020
@author: xiangdeng
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import torch
import copy
from model.GIN import GIN_dict
from model.GCN import GCN_dict
from Temp.dataset import GINDataset
from utils.GIN.full_loader import GraphFullDataLoader
from Temp.stru_dataset import STRDataset
from utils.GIN.data_loader import GraphDataLoader, collate
import os
def adjust_learning_rate(epoch, learning_rate, optimizer, model):
"""Sets the learning rate to the initial LR decayed by decay rate every steep step"""
if model=='gin':
step = int(epoch/700)
new_lr = learning_rate * (0.1 ** step)
for param_group in optimizer.param_groups:
param_group['lr'] = new_lr
elif model=='gcn':
step = int(epoch/700)
new_lr = learning_rate * (0.1 ** step)
for param_group in optimizer.param_groups:
param_group['lr'] = new_lr
def nernulli_sample(params):
strus = []
for parm in params:
pro = torch.sigmoid(parm)
stru = (pro>=0.5).type(torch.int).cuda() #or torch.bernoulli(pro)
strus +=[stru]
return strus
def norm_loss(n, params, feature_paras, targets, criterion, self_loop, degree_as_label, net,
loss_r_feature_layers, bn_reg_scale, batch_size, gpu, onehot, onehot_cof):
strus = nernulli_sample(params)
if onehot:
graphfeatures = []
for fp in feature_paras:
fea = torch.softmax(fp,1)
graphfeatures += [fea]
else:
graphfeatures = feature_paras
dataset = STRDataset(strus, graphfeatures, targets, self_loop, degree_as_label)
train_loader = GraphFullDataLoader(dataset, batch_size=batch_size, device=gpu).train_loader()
for graphs, labels in train_loader:
labels = labels.cuda()
features = graphs.ndata['attr'].cuda()
outputs = net(graphs, features)
loss1 = criterion(outputs, labels)
loss_distr = sum([mod.r_feature for mod in loss_r_feature_layers])
loss = loss1 + bn_reg_scale*loss_distr
#print('start sample second')
for i in range(n-1):
strus = nernulli_sample(params)
dataset = STRDataset(strus, graphfeatures, targets, self_loop, degree_as_label)
train_loader = GraphFullDataLoader(dataset, batch_size=batch_size, device=gpu).train_loader()
for graphs, labels in train_loader:
labels = labels.cuda()
features = graphs.ndata['attr'].cuda()
outputs = net(graphs, features)
loss1 = criterion(outputs, labels)
loss_distr = sum([mod.r_feature for mod in loss_r_feature_layers])
curloss = loss1+bn_reg_scale*loss_distr
loss+=curloss
loss = loss/n
if onehot:
allfeatures = torch.cat(graphfeatures, dim=0)
b = allfeatures * torch.log(allfeatures)
h = -1.0 * b.sum()/len(allfeatures)
loss = loss + onehot_cof*h
return loss, strus
def generate_b(param):
num=len(param)
first =[]
second=[]
noise=[]
for i in range(num):
temparam=param[i]
noise_shape=temparam.shape
u_noise = torch.rand(size=noise_shape).cuda()
P1 = torch.sigmoid(-temparam)
E1 = (u_noise>P1).type(torch.int).cuda()
P2 = 1 - P1
E2 = (u_noise<P2).type(torch.int).cuda()
first+=[E1]
second+=[E2]
noise+=[u_noise]
return first, second, noise
def bernulli_fastgrad(params, feature_paras, targets, criterion_stru, self_loop, degree_as_label, net, batch_size, gpu,
onehot, loss_r_feature_layers, bn_reg_scale):
first, second, noise = generate_b(params)
if onehot:
graphfeatures = []
for fp in feature_paras:
fea = torch.softmax(fp,1)
graphfeatures += [fea]
else:
graphfeatures = feature_paras
grads = []
dataset1 = STRDataset(first, graphfeatures, targets, self_loop, degree_as_label)
train_loader1 = GraphFullDataLoader(dataset1, batch_size=batch_size, device=gpu).train_loader()
for graphs1, labels1 in train_loader1:
labels1 = labels1.cuda()
features1 = graphs1.ndata['attr'].cuda()
outputs1 = net(graphs1, features1)
loss_ce1 = criterion_stru(outputs1, labels1)
loss_distr1 = sum([mod.r_feature for mod in loss_r_feature_layers])*bn_reg_scale
dataset2 = STRDataset(second, graphfeatures, targets, self_loop, degree_as_label)
train_loader2 = GraphFullDataLoader(dataset2, batch_size=batch_size, device=gpu).train_loader()
for graphs2, labels2 in train_loader2:
labels2 = labels2.cuda()
features2 = graphs2.ndata['attr'].cuda()
outputs2 = net(graphs2, features2)
loss_ce2 = criterion_stru(outputs2, labels2)
loss_distr2 = sum([mod.r_feature for mod in loss_r_feature_layers])*bn_reg_scale
for i in range( len(noise) ):
grad = (loss_ce1[i]-loss_ce2[i] + loss_distr1-loss_distr2 )*(noise[i] - 0.5)
grads+=[grad]
return grads
def task_data(args):
# step 0: setting for gpu
if args.gpu >= 0:
torch.cuda.set_device(args.gpu)
# step 1: prepare dataset
dataset = GINDataset(args.dataset, args.self_loop, args.degree_as_label)
print(dataset.dim_nfeats)
# step 2: prepare data_loader
_, valid_loader = GraphDataLoader(
dataset, batch_size=32, device=args.gpu,
collate_fn=collate, seed=args.dataseed, shuffle=True,
split_name=args.split_name).train_valid_loader()
return dataset, valid_loader
def task_model(args, dataset):
# step 1: prepare model
assert args.tmodel in ['GIN', 'GCN']
assert args.smodel in ['GIN', 'GCN']
if args.tmodel == 'GIN':
modelt = GIN_dict[args.modelt](dataset)
elif args.tmodel == 'GCN':
modelt = GCN_dict[args.modelt](dataset)
else:
raise('Not supporting such model!')
if args.smodel == 'GIN':
models = GIN_dict[args.models](dataset)
elif args.smodel == 'GCN':
models = GCN_dict[args.models](dataset)
else:
raise('Not supporting such model!')
modelt = modelt.cuda()
models = models.cuda()
return modelt, models
def evaluate(model, dataloader, loss_fcn):
model.eval()
total = 0
total_loss = 0
total_correct = 0
with torch.no_grad():
for data in dataloader:
graphs, labels = data
feat = graphs.ndata['attr'].cuda()
labels = labels.cuda()
total += len(labels)
outputs = model(graphs, feat)
_, predicted = torch.max(outputs.data, 1)
total_correct += (predicted == labels.data).sum().item()
loss = loss_fcn(outputs, labels)
total_loss += loss * len(labels)
loss, acc = 1.0 * total_loss / total, 1.0 * total_correct / total
return loss, acc
def generate_graphs(sturectures, features, targets, path, daseed, trial, modelname, bat_num, total_num):
graph_num = len(sturectures)
filep = path+modelname+'fake_mutag'+ str(daseed)+'_'+str(trial)+ '.txt'
if bat_num ==0:
open(filep, 'w').close()
with open(filep,'a') as f:
if bat_num==0:
tnum = str(total_num)
f.write(tnum)
f.write('\n')
for i in range(graph_num):
# node num and label
feas = features[i]
feas = torch.argmax(feas, 1)
feas = feas.to('cpu').numpy()
stru = sturectures[i]
node_number, label = stru.shape[0], targets[i]
label = str(label)
content = str(node_number)+' '+label
#content = content.replace('/n', ' ')
f.write(content)
f.write('\n')
#
for j in range(node_number):
cur_row = stru[j]
neig = ((cur_row == 1).nonzero())
neig = neig[neig!=j]
num = len(neig)
neig = neig.to('cpu').numpy()
'''if num>7:
neig = list(neig)
num = 7
neig = np.array(random.sample(neig, 7))'''
if num>0:
neig=str(neig)[1:-1]
else:
neig = str(neig)
#node_label = random.sample(range(0, 7), 1)[0]
node_label = feas[j]
node_inf = str(node_label)+' '+str(num)+' '+neig
node_inf = node_inf.replace('\n', ' ').replace('\r', ' ')
f.write(node_inf)
f.write('\n')
def create_folder(directory):
# from https://stackoverflow.com/a/273227
if not os.path.exists(directory):
os.makedirs(directory)
|
[
"noreply@github.com"
] |
noreply@github.com
|
e69735542275999d2049a87b2ac118f4185c1903
|
5abdea0be9021f13909c38b09a68bde2d153b210
|
/src/imbalance_strategies.py
|
da7a33632423a1ac5fad4d93f683776a55ae6493
|
[] |
no_license
|
NWPU-IST/hbrPredictor
|
cdcccf0f900d6135f8bab355a71f9b8bc2f2c981
|
a67ca29f7191f816e8bc388449059984e1d86e81
|
refs/heads/master
| 2023-03-19T04:50:15.458017
| 2021-03-09T15:15:44
| 2021-03-09T15:15:44
| 326,311,570
| 9
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,948
|
py
|
# -*- coding: utf-8 -*-
__author__ = 'Junzheng Chen'
import random
import numpy as np
from sklearn.neighbors import NearestNeighbors
from imblearn.over_sampling import SMOTE
from imblearn.over_sampling import SMOTENC
from imblearn.over_sampling import ADASYN
from imblearn.over_sampling import BorderlineSMOTE
from imblearn.under_sampling import RandomUnderSampler
from imblearn.under_sampling import NearMiss
from imblearn.under_sampling import EditedNearestNeighbours
from imblearn.under_sampling import CondensedNearestNeighbour
from imblearn.combine import SMOTEENN
from imblearn.combine import SMOTETomek
class Smote:
def __init__(self, samples, N=10, k=5):
self.n_samples, self.n_attrs = samples.shape
self.N = N
self.k = k
self.samples = samples
self.newindex = 0
# self.synthetic=np.zeros((self.n_samples*N,self.n_attrs))
def over_sampling(self):
N = self.N
print(self.n_attrs)
self.synthetic = np.zeros((self.n_samples * N, self.n_attrs))
neighbors = NearestNeighbors(n_neighbors=self.k).fit(self.samples)
print('neighbors', neighbors)
for i in range(len(self.samples)):
nnarray = neighbors.kneighbors(self.samples[i].reshape(1, -1), return_distance=False)[0]
self._populate(N, i, nnarray)
return self.synthetic
# for each minority class samples,choose N of the k nearest neighbors and generate N synthetic samples.
def _populate(self, N, i, nnarray):
for j in range(N):
nn = random.randint(0, self.k - 1)
dif = self.samples[nnarray[nn]] - self.samples[i]
gap = random.random()
self.synthetic[self.newindex] = self.samples[i] + gap * dif
self.newindex += 1
def get_smote_result(data_list, label, N):
length = len(data_list)
postive_data = []
for i in range(0, length):
if label[i] == 1:
postive_data.append(data_list[i])
data_array = np.array(postive_data)
smoke = Smote(data_array, N, 5)
return smoke.over_sampling()
# Combination of over-and under-sampling methods
def get_cbs_smoteenn(data_list, label):
smo = SMOTEENN(random_state=42)
X_smo, y_smo = smo.fit_resample(data_list, label)
return X_smo, y_smo
def get_cbs_smotetomek(data_list, label):
smo = SMOTETomek(random_state=42)
X_smo, y_smo = smo.fit_resample(data_list, label)
return X_smo, y_smo
# Under sampling
def get_uds_rdm(data_list, label):
rdm = RandomUnderSampler()
X_rdm, y_rdm = rdm.fit_resample(data_list, label)
return X_rdm, y_rdm
def get_uds_nm(data_list, label):
nm = NearMiss()
X_nm, y_nm = nm.fit_resample(data_list, label)
return X_nm, y_nm
def get_uds_enn(data_list, label):
enn = EditedNearestNeighbours()
X_res, y_res = enn.fit_resample(data_list, label)
def get_uds_CNN(data_list, label):
cnn = CondensedNearestNeighbour(random_state=42)
X_res, y_res = cnn.fit_resample(data_list, label)
# Over sampling
def get_ovs_smote_standard(data_list, label):
smo = SMOTE(random_state=42)
X_smo, y_smo = smo.fit_sample(data_list, label)
return X_smo, y_smo
def get_ovs_adasyn(data_list, label):
smo = ADASYN(random_state=42)
X_smo, y_smo = smo.fit_resample(data_list, label)
return X_smo, y_smo
def get_ovs_smotenc(data_list, label):
smo = SMOTENC(random_state=42, categorical_features=[18, 19])
X_smo, y_smo = smo.fit_resample(data_list, label)
return X_smo, y_smo
def get_ovs_BorderlineSMOTE(data_list, label):
bd_smt = BorderlineSMOTE()
X_smo, y_smo = bd_smt.fit_resample(data_list, label)
return X_smo, y_smo
def get_ovs_smote_borderline_1(clf, data, label, m, s, k=5):
label_local = label[:]
clf.fit(data, label_local)
data_list = data.tolist()
data_list = data_list[:]
length = len(data_list)
T = np.array(data_list)
n_samples, n_attrs = T.shape
# get p list
P = []
for i in range(0, length):
if label_local[i] == 1:
P.append(i)
n_samples = len(P)
# calc m for all the positive sample
neighbors = NearestNeighbors(n_neighbors=k).fit(T)
synthetic = np.zeros((n_samples * m, n_attrs))
newindex = 0
for i in range(len(P)):
nnarray = neighbors.kneighbors(T[P[i]].reshape(1, -1), return_distance=False)[0]
for j in range(m):
nn = random.randint(0, k - 1)
dif = T[nnarray[nn]] - T[P[i]]
gap = random.random()
synthetic[newindex] = T[P[i]] + gap * dif
newindex += 1
pred = []
danger = []
noise = []
for i in range(0, n_samples * m):
pred.append(clf.predict(synthetic[i].reshape(1, -1)))
for i in range(0, len(pred)):
if i % 5 != 0:
continue
count = 0
for j in range(0, 5):
if i + j >= len(pred) - 1:
continue
if pred[i + j] == 0:
count += 1
if count == 5:
noise.append(P[int(i / 5)])
elif count > 2:
danger.append(P[int(i / 5)])
n_samples_danger = len(danger)
# calc m for all the positive sample
danger_list = []
for i in danger:
danger_list.append(T[i])
if not danger_list:
result = []
result.append(data_list)
result.append(label)
return result
neighbors = NearestNeighbors(n_neighbors=k).fit(danger_list)
synthetic_danger = np.zeros((n_samples_danger * s, n_attrs), dtype=float)
newindex_danger = 0
for i in range(len(danger)):
if 5 > len(danger):
result = []
result.append(data_list)
result.append(label)
return result
nnarray = neighbors.kneighbors(T[danger[i]].reshape(1, -1), return_distance=False)[0]
for j in range(m):
nn = random.randint(0, k - 1)
dif = T[nnarray[nn]] - T[danger[i]]
gap = random.random()
synthetic_danger[newindex_danger] = T[danger[i]] + gap * dif
newindex_danger += 1
synthetic_danger_list = synthetic_danger.tolist()
noise.reverse()
# 删除noise
for i in range(0,len(noise)):
del data_list[noise[i]]
del label_local[noise[i]]
# 添加正项
random_list = []
for i in range(0, len(synthetic_danger_list)):
random_list.append(int(random.random() * len(data_list)))
for i in range(0, len(random_list)):
data_list.insert(random_list[i], synthetic_danger_list[i])
label_local.insert(random_list[i], 1)
result = []
result.append(data_list)
result.append(label_local)
return result
|
[
"noreply@github.com"
] |
noreply@github.com
|
6000fbee425fac253a92896202dbe378a0ccf7ea
|
e50994cf741d5221080cc5c4d7a5e53e43d58b36
|
/20190119/testcase2/test3.py
|
5c1a53666c46fa65064b98a62bfa812c154edbd9
|
[
"Apache-2.0"
] |
permissive
|
sly1314sly/selenium_basic
|
5fff7f9aa11d95d892ebfe013007bc5aaba2ea84
|
53bc2bf4d8a81bcd71f7fe5910cbc34ecfc6869a
|
refs/heads/master
| 2020-04-13T08:49:01.812269
| 2020-01-10T09:29:10
| 2020-01-10T09:29:10
| 163,092,788
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 544
|
py
|
import unittest
class TestStringMethods3(unittest.TestCase):
def test_upper3(self):
self.assertEqual('foo'.upper(), 'FOO')
def test_isupper3(self):
self.assertTrue('FOO'.isupper())
self.assertFalse('Foo'.isupper())
def test_split3(self):
s = 'hello world'
self.assertEqual(s.split(), ['hello', 'world'])
# check that s.split fails when the separator is not a string
with self.assertRaises(TypeError):
s.split(2)
if __name__ == '__main__':
unittest.main()
|
[
"928446761@qq.com"
] |
928446761@qq.com
|
9cfcec30b699e1871fb85a7b2b0c628a6a1052da
|
2502facbf895f3e27b3ce77b223452171ab1d532
|
/utilities/customLogger.py
|
be6c5325a9ac366defb8ba9f630e35b0df58ac5b
|
[] |
no_license
|
Kavya1709/nopCommerce
|
538dac6e9f62dd8fa716a77bde4c119347d49c8d
|
024c149a2bf5caa493c82929feb6497fe22eda8a
|
refs/heads/master
| 2022-12-17T19:49:41.865873
| 2020-09-11T13:38:55
| 2020-09-11T13:38:55
| 294,701,209
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 956
|
py
|
import logging
class LogGen:
@staticmethod
def loggen():
for handler in logging.root.handlers[:]:
logging.root.removeHandler(handler)
logging.basicConfig(filename='.\\Logs\\automation.log', format='%(asctime)s: %(levelname)s %(message)s', datefmt='%m/%d/%Y %I:%M:%S %p')
logger = logging.getLogger()
logger.setLevel(logging.INFO)
return logger
# @staticmethod
# def log_gen():
#
# LOG_FILENAME = '.\\Logs\\automation.log'
#
# # Set up a specific logger with our desired output level
# my_logger = logging.getLogger('nopCommerce')
# my_logger.setLevel(logging.INFO)
#
# # Add the log message handler to the logger
# handler = logging.handlers.RotatingFileHandler(
# LOG_FILENAME, maxBytes=20, backupCount=5)
#
# my_logger.addHandler(handler)
#
# return my_logger
|
[
"kavyaprakash17@gmail.com"
] |
kavyaprakash17@gmail.com
|
edc58be22d8da0892938e5e2414d522b0016bddc
|
9b9ae524386d16396a8c69d671d1e588ac1fb7f9
|
/week13/maoyan/maoyan/items.py
|
25ca9521abf8bf9253dec44bfa2263b81e6b7382
|
[] |
no_license
|
WeiZhixiong/Python006-006
|
d4d15977ac3f44566123cb029ae2015e2d24f133
|
6263b78f211c66332c27949bacadd28f6f19ffdb
|
refs/heads/main
| 2023-04-15T10:35:42.376722
| 2021-04-29T05:25:50
| 2021-04-29T05:25:50
| 322,471,510
| 0
| 0
| null | 2020-12-18T02:52:30
| 2020-12-18T02:52:30
| null |
UTF-8
|
Python
| false
| false
| 271
|
py
|
# Define here the models for your scraped items
#
# See documentation in:
# https://docs.scrapy.org/en/latest/topics/items.html
import scrapy
class MaoYanItem(scrapy.Item):
movie_name = scrapy.Field()
movie_type = scrapy.Field()
show_time = scrapy.Field()
|
[
"zhixiong.wei@qq.com"
] |
zhixiong.wei@qq.com
|
8ff2bd1fe3bcae9eaddea886a981234c02e1ccc7
|
8faa47c6b03940bc382a654fcc5ac99babd99d4e
|
/auth.py
|
9cbb74114c71c49a43a0eb158b05868fef3f1d79
|
[] |
no_license
|
TobiasGrosch/sprint-velocity-planning
|
7262038cf2359f0d55834c9333386be1342f947c
|
e575416fcc3bb70ebe9d1c0702c1d2a6c4606bdd
|
refs/heads/master
| 2023-04-19T07:22:35.989809
| 2021-04-25T06:47:53
| 2021-04-25T06:47:53
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,050
|
py
|
import json
from flask import request, _request_ctx_stack, abort
from functools import wraps
from jose import jwt
from urllib.request import urlopen
AUTH0_DOMAIN = 'fsnd-groscht.eu.auth0.com'
ALGORITHMS = ['RS256']
API_AUDIENCE = 'sprint_vel'
## AuthError Exception
'''
AuthError Exception
A standardized way to communicate auth failure modes
'''
class AuthError(Exception):
def __init__(self, error, status_code):
self.error = error
self.status_code = status_code
## Auth Header
def get_token_auth_header():
"""Obtains the Access Token from the Authorization Header
"""
auth = request.headers.get('Authorization', None)
if not auth:
raise AuthError({
'code': 'authorization_header_missing',
'description': 'Authorization header is expected.'
}, 401)
parts = auth.split()
if parts[0].lower() != 'bearer':
raise AuthError({
'code': 'invalid_header',
'description': 'Authorization header must start with "Bearer".'
}, 401)
elif len(parts) == 1:
raise AuthError({
'code': 'invalid_header',
'description': 'Token not found.'
}, 401)
elif len(parts) > 2:
raise AuthError({
'code': 'invalid_header',
'description': 'Authorization header must be bearer token.'
}, 401)
token = parts[1]
return token
def check_permissions(permission, payload):
if 'permissions' not in payload:
raise AuthError({
'code': 'invalid_claims',
'description': 'Permissions not included in JWT.'
}, 400)
if permission not in payload['permissions']:
raise AuthError({
'code': 'unauthorized',
'description': 'Permission not found.'
}, 401)
return True
def verify_decode_jwt(token):
jsonurl = urlopen(f'https://{AUTH0_DOMAIN}/.well-known/jwks.json')
jwks = json.loads(jsonurl.read())
unverified_header = jwt.get_unverified_header(token)
rsa_key = {}
if 'kid' not in unverified_header:
raise AuthError({
'code': 'invalid_header',
'description': 'Authorization malformed.'
}, 401)
for key in jwks['keys']:
if key['kid'] == unverified_header['kid']:
rsa_key = {
'kty': key['kty'],
'kid': key['kid'],
'use': key['use'],
'n': key['n'],
'e': key['e']
}
if rsa_key:
try:
payload = jwt.decode(
token,
rsa_key,
algorithms=ALGORITHMS,
audience=API_AUDIENCE,
issuer='https://' + AUTH0_DOMAIN + '/'
)
return payload
except jwt.ExpiredSignatureError:
raise AuthError({
'code': 'token_expired',
'description': 'Token expired.'
}, 401)
except jwt.JWTClaimsError:
raise AuthError({
'code': 'invalid_claims',
'description': 'Incorrect claims. Please, check the audience and issuer.'
}, 401)
except Exception:
raise AuthError({
'code': 'invalid_header',
'description': 'Unable to parse authentication token.'
}, 400)
raise AuthError({
'code': 'invalid_header',
'description': 'Unable to find the appropriate key.'
}, 400)
def requires_auth(permission=''):
def requires_auth_decorator(f):
@wraps(f)
def wrapper(*args, **kwargs):
token = get_token_auth_header()
try:
payload = verify_decode_jwt(token)
except:
abort(401)
check_permissions(permission, payload)
return f(payload, *args, **kwargs)
return wrapper
return requires_auth_decorator
|
[
"ubuntu@ip-10-101-1-61.eu-central-1.compute.internal"
] |
ubuntu@ip-10-101-1-61.eu-central-1.compute.internal
|
40b5a7f814ed68cbc12969cb867747a1687e0e1b
|
ac1e60fd4bb3b7cc04e413ae394836abad8947b1
|
/email_verification_api/wsgi.py
|
e60483842d64ef833b28dfd12be0cfe5d6bf9eba
|
[] |
no_license
|
Taycode/email-verification-api
|
9c48642f34671232c388a7c763541f02ff9ae614
|
f3abe35a010d5b2d3d2c269fa728eb40f26630a0
|
refs/heads/master
| 2020-08-04T11:00:29.103892
| 2019-10-01T14:49:14
| 2019-10-01T14:49:14
| 212,114,710
| 0
| 0
| null | 2019-10-01T14:16:59
| 2019-10-01T14:16:58
| null |
UTF-8
|
Python
| false
| false
| 421
|
py
|
"""
WSGI config for email_verification_api project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'email_verification_api.settings')
application = get_wsgi_application()
|
[
"tay2druh@gmail.com"
] |
tay2druh@gmail.com
|
08273d87152e339e41af2407ff4bbad8cc28e79c
|
f2b91692a434ee79ff5d68ed3111d60d90315f00
|
/src/command_modules/azure-cli-servicebus/azure/cli/command_modules/servicebus/_validators.py
|
6a4509e9f662b17fe8494f89fce3441aa9719205
|
[
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
cal5barton/azure-cli
|
f883bc7d481b163d4c4af1fa154a990182e5de80
|
6ebc6f810f32b8fce30a360633a70fcfdea15e7b
|
refs/heads/dev
| 2023-05-24T18:12:36.151238
| 2018-07-12T16:16:29
| 2018-07-12T16:16:29
| 140,749,210
| 0
| 0
|
MIT
| 2023-05-15T18:58:31
| 2018-07-12T18:13:18
|
Python
|
UTF-8
|
Python
| false
| false
| 4,322
|
py
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
# pylint: disable=line-too-long
# pylint: disable=unused-variable
import re
from datetime import timedelta
from isodate import parse_duration
from knack.util import CLIError
# PARAMETER VALIDATORS
# Type ISO 8061 duration
iso8601pattern = re.compile("^P(?!$)(\\d+Y)?(\\d+M)?(\\d+W)?(\\d+D)?(T(?=\\d)(\\d+H)?(\\d+M)?(\\d+.)?(\\d+S)?)?$")
timedeltapattern = re.compile("^\\d+:\\d+:\\d+$")
def _validate_lock_duration(namespace):
if namespace.lock_duration:
if iso8601pattern.match(namespace.lock_duration):
if parse_duration(namespace.lock_duration) > timedelta(days=0, minutes=6, seconds=0):
raise CLIError(
'--lock-duration Value Error : {0} value, The maximum value for LockDuration is 5 minutes; the default value is 1 minute.'.format(
namespace.lock_duration))
elif timedeltapattern.match(namespace.lock_duration):
day, miniute, seconds = namespace.lock_duration.split(":")
if int(day) > 0 or int(miniute) > 6:
raise CLIError(
'--lock-duration Value Error : {0} value, The maximum value for LockDuration is 5 minutes; the default value is 1 minute.'.format(
namespace.lock_duration))
else:
raise CLIError('--lock-duration Value Error : {0} value is not in ISO 8601 timespan / duration format. e.g.'
' PT10M for duration of 10 min or 00:10:00 for duration of 10 min'.format(namespace.lock_duration))
def _validate_default_message_time_to_live(namespace):
if namespace.default_message_time_to_live:
if not iso8601pattern.match(namespace.default_message_time_to_live) and not timedeltapattern.match(namespace.default_message_time_to_live):
raise CLIError('--default-message-time-to-live Value Error : {0} value is not in ISO 8601 timespan / duration format. e.g. PT10M for duration of 10 min or 00:10:00 for duration of 10 min'.format(namespace.default_message_time_to_live))
def _validate_duplicate_detection_history_time_window(namespace):
if namespace.duplicate_detection_history_time_window:
if iso8601pattern.match(namespace.duplicate_detection_history_time_window):
pass
elif timedeltapattern.match(namespace.duplicate_detection_history_time_window):
pass
else:
raise CLIError('--duplicate-detection-history-time-window Value Error : {0} value is not in ISO 8601 timespan / duration format. e.g. PT10M for duration of 10 min or 00:10:00 for duration of 10 min'.format(namespace.duplicate_detection_history_time_window))
def _validate_auto_delete_on_idle(namespace):
if namespace.auto_delete_on_idle:
if iso8601pattern.match(namespace.auto_delete_on_idle):
pass
elif timedeltapattern.match(namespace.auto_delete_on_idle):
pass
else:
raise CLIError('--auto-delete-on-idle Value Error : {0} value is not in ISO 8601 timespan / duration format. e.g. PT10M for duration of 10 min or 00:10:00 for duration of 10 min'.format(namespace.auto_delete_on_idle))
def validate_partner_namespace(cmd, namespace):
from azure.cli.core.commands.client_factory import get_subscription_id
from msrestazure.tools import is_valid_resource_id, resource_id
if namespace.partner_namespace:
if not is_valid_resource_id(namespace.partner_namespace):
namespace.partner_namespace = resource_id(
subscription=get_subscription_id(cmd.cli_ctx),
resource_group=namespace.resource_group_name,
namespace='Microsoft.ServiceBus',
type='namespaces',
name=namespace.partner_namespace)
def validate_premiumsku_capacity(namespace):
if namespace.sku and namespace.sku != 'Premium' and namespace.capacity:
raise CLIError('--capacity - This property is only applicable to namespaces of Premium SKU')
|
[
"tjprescott@users.noreply.github.com"
] |
tjprescott@users.noreply.github.com
|
9ec5875503577bf114e6521a6174ca229c968b95
|
c1e0874f55d05ee990ed2d637c2910701b32d246
|
/soft_uni_OOP/Defining Classes/lab/scope_mess_3.py
|
03d81f4774c92bdc435a7583da245e72d79f8461
|
[] |
no_license
|
borislavstoychev/Soft_Uni
|
5d047bef402c50215e0abc825476326889ffd0be
|
ccc0b2fb18f8ad6809b475eb20e82a9e4eb4b0b0
|
refs/heads/master
| 2023-05-11T12:27:08.672058
| 2021-05-28T18:00:10
| 2021-05-28T18:00:10
| 277,556,731
| 3
| 2
| null | 2021-02-11T19:57:37
| 2020-07-06T13:58:23
|
Python
|
UTF-8
|
Python
| false
| false
| 311
|
py
|
x = "global"
def outer():
x = "local"
def inner():
nonlocal x
x = "nonlocal"
print("inner:", x)
def change_global():
global x
x = "global: changed!"
print("outer:", x)
inner()
print("outer:", x)
change_global()
print(x)
outer()
print(x)
|
[
"stoy4ew@gmail.com"
] |
stoy4ew@gmail.com
|
00cbe7556cb10b9d98cdd70f69f8f9392fef4b9a
|
325512acc574dffa50d3d7d9645391e2928df127
|
/main.py
|
137f8ee5729044c1f7a1566aec02584d702c168f
|
[] |
no_license
|
sunca7/ff
|
8366d4d6d7c7f22201ae9c324960a16dea045437
|
a6f43f55dc7ed1602908f58c3710cf0b1846b8d9
|
refs/heads/master
| 2022-12-01T02:57:12.815321
| 2020-08-04T14:35:41
| 2020-08-04T14:35:41
| 285,006,260
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,190
|
py
|
import os
import requests
from flask import Flask, render_template, request
from bs4 import BeautifulSoup
from save import save_to_file
LIMIT = 20
db = {}
level_four_db = [{'company': '삼성전자', 'company_url': '/item/main.nhn?code=005930'}, {'company': 'NAVER', 'company_url': '/item/main.nhn?code=035420'}, {'company': '셀트리온', 'company_url': '/item/main.nhn?code=068270'}, {'company': '현대모비스', 'company_url': '/item/main.nhn?code=012330'}, {'company': '엔씨소프트', 'company_url': '/item/main.nhn?code=036570'}, {'company': 'POSCO', 'company_url': '/item/main.nhn?code=005490'}, {'company': 'KB금융', 'company_url': '/item/main.nhn?code=105560'}, {'company': '신한지주', 'company_url': '/item/main.nhn?code=055550'}, {'company': 'KT&G', 'company_url': '/item/main.nhn?code=033780'}, {'company': '넷마블', 'company_url': '/item/main.nhn?code=251270'}, {'company': '삼성생명', 'company_url': '/item/main.nhn?code=032830'}, {'company': '하나금융지주', 'company_url': '/item/main.nhn?code=086790'}, {'company': '삼성화재', 'company_url': '/item/main.nhn?code=000810'}, {'company': '고려아연', 'company_url': '/item/main.nhn?code=010130'}, {'company': '우리금융지주', 'company_url': '/item/main.nhn?code=316140'}, {'company': '기업은행', 'company_url': '/item/main.nhn?code=024110'}, {'company': '미래에셋대우', 'company_url': '/item/main.nhn?code=006800'}, {'company': '포스코케미칼', 'company_url': '/item/main.nhn?code=003670'}, {'company': '현대글로비스', 'company_url': '/item/main.nhn?code=086280'}, {'company': '현대건설', 'company_url': '/item/main.nhn?code=000720'}, {'company': '유한양행', 'company_url': '/item/main.nhn?code=000100'}, {'company': 'DB손해보험', 'company_url': '/item/main.nhn?code=005830'}, {'company': '삼성카드', 'company_url': '/item/main.nhn?code=029780'}, {'company': '삼성증권', 'company_url': '/item/main.nhn?code=016360'}, {'company': 'NH투자증권', 'company_url': '/item/main.nhn?code=005940'}, {'company': '일진머티리얼즈', 'company_url': '/item/main.nhn?code=020150'}, {'company': '농심', 'company_url': '/item/main.nhn?code=004370'}, {'company': '메리츠증권', 'company_url': '/item/main.nhn?code=008560'}, {'company': '현대해상', 'company_url': '/item/main.nhn?code=001450'}, {'company': '제일기획', 'company_url': '/item/main.nhn?code=030000'}, {'company': '동서', 'company_url': '/item/main.nhn?code=026960'}, {'company': 'LS ELECTRIC', 'company_url': '/item/main.nhn?code=010120'}, {'company': 'BNK금융지주', 'company_url': '/item/main.nhn?code=138930'}, {'company': '한올바이오파마', 'company_url': '/item/main.nhn?code=009420'}, {'company': '종근당', 'company_url': '/item/main.nhn?code=185750'}, {'company': 'HDC현대산업개발', 'company_url': '/item/main.nhn?code=294870'}, {'company': 'DB하이텍', 'company_url': '/item/main.nhn?code=000990'}, {'company': '한전KPS', 'company_url': '/item/main.nhn?code=051600'}, {'company': '영원무역', 'company_url': '/item/main.nhn?code=111770'}, {'company': '한국테크놀로지그룹', 'company_url': '/item/main.nhn?code=000240'}, {'company': '이노션', 'company_url': '/item/main.nhn?code=214320'}, {'company': '영풍', 'company_url': '/item/main.nhn?code=000670'}, {'company': '쿠쿠홈시스', 'company_url': '/item/main.nhn?code=284740'}, {'company': '보령제약', 'company_url': '/item/main.nhn?code=003850'}, {'company': '휴켐스', 'company_url': '/item/main.nhn?code=069260'}, {'company': '빙그레', 'company_url': '/item/main.nhn?code=005180'}, {'company': '락앤락', 'company_url': '/item/main.nhn?code=115390'}, {'company': '쿠쿠홀딩스', 'company_url': '/item/main.nhn?code=192400'}, {'company': '세방전지', 'company_url': '/item/main.nhn?code=004490'}]
company_list_url = f"https://finance.naver.com/sise/entryJongmok.nhn?&page="
company_info_base_url = "https://finance.naver.com"
kosdaq_list_url = "https://finance.naver.com/sise/sise_market_sum.nhn?sosok=1&page="
app = Flask("ff")
# os.system('cls' if os.name=='nt' else 'clear')
def request_company_list():
company_list = []
# for nb in range(1,21):
for nb in range(1,21):
request_company_list = requests.get(f"{company_list_url}{nb}")
soup = BeautifulSoup(request_company_list.text, "html.parser")
company_table_list = soup.find_all("td", {"class":"ctg"})
for info in company_table_list:
company = info.find('a').text
company_url = info.find('a')["href"]
company_list.append({"company": company, "company_url": company_url})
return company_list
def extract_indiv_table(company):
request_company_info = requests.get(f"{company_info_base_url}{company.get('company_url')}")
soup = BeautifulSoup(request_company_info.text, "html.parser")
info_table = soup.find("table", {"class": "tb_type1_ifrs"})
info_table_row = info_table.find_all("tr")
return info_table_row
def level_one_extract(company):
info_table_row = extract_indiv_table(company)
net_income_ten = info_table_row[5].find_all("td")
for net_income in net_income_ten:
if "-" in net_income.text.strip():
return -1
else:
result = 1
return result
def level_one_company(company_list):
print("request level one")
level_one = []
# for company in company_list:
# for company in company_list[:1]:
for company in company_list:
one = level_one_extract(company)
if one == 1:
level_one.append(company)
print("level 1 len: ", len(level_one))
return level_one
def four_extract_company(company):
info_table_row = extract_indiv_table(company)
debt_ratio_ten = info_table_row[9].find_all("td")
for debt_ratio in debt_ratio_ten:
nbr = debt_ratio.text.strip().split('.')[0].replace(',','')
if nbr != '':
if int(nbr) > 100:
return -4
else:
result = 4
return result
def four_second_extract_company(company):
info_table_row = extract_indiv_table(company)
checking_ratio_ten = info_table_row[10].find_all("td")
for checking_ratio in checking_ratio_ten:
nbr = checking_ratio.text.strip().split('.')[0].replace(',','')
if nbr != '':
if int(nbr) < 100:
return -4
else:
result = 4
return result
def level_four_company(company_list):
print("request level four")
level_four = []
for company in company_list:
four = four_extract_company(company)
four = four_second_extract_company(company)
if four == 4:
level_four.append(company)
print("level 4 len: ", len(level_four))
return level_four
def six_extract_company(company):
pass
def request_kosdaq():
kosdaq_list = []
for nb in range(1,5):
print(f"{nb}")
request_kosdaq_list = requests.get(f"{kosdaq_list_url}{nb}")
soup = BeautifulSoup(request_kosdaq_list.text, "html.parser")
kosdaq_table_list = soup.find_all("tr")
for info in kosdaq_table_list[7:-1]:
if info.find("a"):
company = info.find('a').text
company_url = info.find('a')["href"]
kosdaq_list.append({"company": company, "company_url": company_url})
else :
continue
print(kosdaq_list)
return kosdaq_list
def ff_program(company_list):
level_one = level_one_company(company_list)
level_four = level_four_company(level_one)
return level_four
# @app.route("/")
# def kospi():
# print("Level four len", len(level_four_db))
# print(level_four_db)
# save_to_file(level_four_db)
# return render_template("index.html", level_four=level_four_db)
# def kosdaq():
# kosdaq_list = request_kosdaq()
# return kosdaq_list
@app.route("/")
def financial_freedom():
# kospi_list = request_company_list()
kosdaq_list = request_kosdaq()
kosdaq_level_four = ff_program(kosdaq_list)
print(kosdaq_level_four)
save_to_file(kosdaq_level_four)
# app.run(host="0.0.0.0")
financial_freedom()
# if "company" not in db:
# print("request company")
# db["company"] = request_company_list()
# company_list = db["company"]
# if "one" not in db:
# print("request level one")
# db["one"] = level_one_company(company_list)
# level_one = db["one"]
# print("Level one len ", len(level_one))
#
|
[
"kosah302@gmail.com"
] |
kosah302@gmail.com
|
84c1041757c13e9f14ac643dc4bfae4d661fad21
|
04a17e0f43befad7be3518a99eadbf5123e31dd3
|
/semana5/MyHolidays/myholidays/holidays.py
|
b49f9254358e14edfca59fffdb88133641e5f8d4
|
[] |
no_license
|
grcopia/NappAcademy
|
587b49263e704b56d804567ec1efadc1c72bf0c0
|
64f3732c3ca4337c4811bee3736be61b6169985a
|
refs/heads/master
| 2023-04-23T04:47:45.789203
| 2021-04-17T18:33:32
| 2021-04-17T18:33:32
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,427
|
py
|
from datetime import date
from datetime import datetime
from dateutil.parser import parse
class MyCalendar:
def __init__(self, *args):
self.datas = []
self.check_holiday()
for item in args:
if isinstance(item, date):
self.datas.append(item)
elif type(item) == str:
if len(str(item).split('/')) > 2:
if int(str(item).split('/')[1]) > 12 or len(str(item).split('/')[0]) > 2:
continue
self.datas.append(parse(item).date())
else:
pass
# self.datas = [parse(str(item)) for item in args if type(item) not in types and len(str(item).split('/')) > 2]
def check_holiday(self, *args):
check_day = []
for data in args:
print(data)
if len(str(data).split('/')[0]) > 2:
check_day.append('invalido')
continue
if isinstance(data, date):
check_day.append(data)
elif type(data) == str:
if int(str(data).split('/')[1]) > 12 or len(str(data)) <= 5:
check_day.append('invalido')
else:
check_day.append(parse(data))
else:
pass
for day in check_day:
print(day)
if day == 'invalido':
return False
if day.weekday() == 6 or day.weekday() == 5:
return True
else:
return False
def add_holiday(self, *args):
for item in args:
if type(item) == str:
if int(str(item).split('/')[1]) > 12 or len(str(item).split('/')[0]) >= 3:
continue
if parse(str(item)).date() in self.datas:
pass
elif isinstance(item, date):
self.datas.append(item)
elif type(item) == str:
self.datas.append(parse(item))
if __name__ == '__main__':
dt1 = '15/15/2021'
dt2 = '120/3/2021'
dt3 = '15/03/2021'
dt4 = '15/05'
dt5 = '24/24/2021'
objeto = MyCalendar(dt1, dt2)
assert objeto.check_holiday(dt1) is False
assert objeto.check_holiday(dt2) is False
assert objeto.check_holiday(dt3) is False
assert objeto.check_holiday(dt4) is False
assert objeto.check_holiday(dt5) is False
|
[
"vagnerpelais@gmail.com"
] |
vagnerpelais@gmail.com
|
6f4fda92cc404753602829b7b45f67c7d15c83ed
|
d3cc5966bccf06dd733bc6b51a0c4d9d8fc1baec
|
/ingest/ingest-equipment.py
|
0c39157df34390570655eea8020c053643c2272b
|
[] |
no_license
|
zhongh/Additive-Manufacturing-Processing-Ontology
|
0229464dfe9e474b1e2b50abefe48a6436cb4569
|
a729aef5db049de13718c9b4de56ad93f7ec7985
|
refs/heads/master
| 2020-03-23T15:18:22.076341
| 2016-11-30T21:11:26
| 2016-11-30T21:11:26
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,639
|
py
|
__author__ = 'congrui_li'
from SPARQLWrapper import SPARQLWrapper, JSON
from rdflib import Namespace, RDF
import json
import requests
import multiprocessing
from itertools import chain
import functools
import argparse
from Maybe import *
import collections
def load_file(filepath):
with open(filepath) as _file:
return _file.read().replace('\n', " ")
AMPO = Namespace("https://tw.rpi.edu/web/project/ampo#")
SIO = Namespace("http://semanticscience.org/ontology/sio.owl#")
FOAF = Namespace("http://xmlns.com/foaf/0.1/")
PROV = Namespace("http://www.w3.org/ns/prov#")
QUDT = Namespace("http://data.qudt.org/qudt/owl/1.0.0/qudt.owl#")
VITRO = Namespace("http://vitro.mannlib.cornell.edu/ns/vitro/0.7#")
BIBO = Namespace("http://purl.org/ontology/bibo/")
VCARD = Namespace("http://www.w3.org/2006/vcard/ns#")
VIVO = Namespace('http://vivoweb.org/ontology/core#')
get_equipment_query = load_file("queries/listEquip.rq")
describe_equipment_query = load_file("queries/describeEquip.rq")
# standard filters
non_empty_str = lambda s: True if s else False
has_label = lambda o: True if o.label() else False
def get_metadata(id):
return {"index": {"_index": "ampo", "_type": "equipment", "_id": id}}
def select(endpoint, query):
sparql = SPARQLWrapper(endpoint)
sparql.setQuery(query)
sparql.setReturnFormat(JSON)
results = sparql.query().convert()
return results["results"]["bindings"]
def describe(endpoint, query):
sparql = SPARQLWrapper(endpoint)
sparql.setQuery(query)
try:
return sparql.query().convert()
except RuntimeWarning:
pass
def has_type(resource, type):
for rtype in resource.objects(RDF.type):
if str(rtype.identifier) == str(type):
return True
return False
def get_equipment(endpoint):
r = select(endpoint, get_equipment_query)
return [rs["equipment"]["value"] for rs in r]
def describe_equipment(endpoint, equipment):
q = describe_equipment_query.replace("?equipment", "<" + equipment + ">")
return describe(endpoint, q)
def get_most_specific_type(equipment):
return Maybe.of(equipment).stream() \
.flatmap(lambda p: p.objects(VITRO.mostSpecificType)) \
.map(lambda t: t.label()) \
.filter(non_empty_str) \
.one().value
def get_processes(equipment):
return Maybe.of(equipment).stream() \
.flatmap(lambda p: p.objects(AMPO.isParticipantIn)) \
.filter(has_label) \
.map(lambda r: {"uri": str(r.identifier), "name": str(r.label())}).list()
def get_larger_equip(equipment):
return Maybe.of(equipment).stream() \
.flatmap(lambda p: p.objects(AMPO.isPartOf)) \
.filter(has_label) \
.map(lambda r: {"uri": str(r.identifier), "name": str(r.label())}).list()
def get_smaller_equip(equipment):
return Maybe.of(equipment).stream() \
.flatmap(lambda p: p.objects(AMPO.hasPart)) \
.filter(has_label) \
.map(lambda r: {"uri": str(r.identifier), "name": str(r.label())}).list()
def get_inputs(equipment):
return Maybe.of(equipment).stream() \
.flatmap(lambda p: p.objects(AMPO.hasInput)) \
.filter(has_label) \
.map(lambda r: {"uri": str(r.identifier), "name": str(r.label())}).list()
def get_attrs(equipment):
return Maybe.of(equipment).stream() \
.flatmap(lambda p: p.objects(AMPO.hasAttribute)) \
.filter(has_label) \
.map(lambda r: {"uri": str(r.identifier), "name": str(r.label())}).list()
def create_equipment_doc(equipment, endpoint):
graph = describe_equipment(endpoint=endpoint, equipment=equipment)
equ = graph.resource(equipment)
try:
name = equ.label()
except AttributeError:
print("missing name:", equipment)
return {}
doc = {"uri": equipment, "name": name}
most_specific_type = get_most_specific_type(equ)
if most_specific_type:
doc.update({"mostSpecificType": most_specific_type})
processes = get_processes(equ)
if processes:
new_processes = sorted(processes, key=lambda k: k['name'])
doc.update({"process": new_processes[0]})
larger_equip = get_larger_equip(equ)
if larger_equip:
doc.update({"largerEquip": larger_equip})
smaller_equip = get_smaller_equip(equ)
if smaller_equip:
doc.update({"smallerEquip": smaller_equip})
inputs = get_inputs(equ)
if inputs:
doc.update({"input": inputs})
attrs = get_attrs(equ)
if attrs:
doc.update({"attr": attrs})
return doc
def process_equipment(equipment, endpoint):
equ = create_equipment_doc(equipment=equipment, endpoint=endpoint)
es_id = equ["uri"]
return [json.dumps(get_metadata(es_id)), json.dumps(equ)]
def publish(bulk, endpoint, rebuild, mapping):
# if configured to rebuild_index
# Delete and then re-create to publication index (via PUT request)
index_url = endpoint + "/ampo"
if rebuild:
requests.delete(index_url)
r = requests.put(index_url)
if r.status_code != requests.codes.ok:
print(r.url, r.status_code)
r.raise_for_status()
# push current publication document mapping
mapping_url = endpoint + "/ampo/equipment/_mapping"
with open(mapping) as mapping_file:
r = requests.put(mapping_url, data=mapping_file)
if r.status_code != requests.codes.ok:
# new mapping may be incompatible with previous
# delete current mapping and re-push
requests.delete(mapping_url)
r = requests.put(mapping_url, data=mapping_file)
if r.status_code != requests.codes.ok:
print(r.url, r.status_code)
r.raise_for_status()
# bulk import new publication documents
bulk_import_url = endpoint + "/_bulk"
r = requests.post(bulk_import_url, data=bulk)
if r.status_code != requests.codes.ok:
print(r.url, r.status_code)
r.raise_for_status()
def generate(threads, sparql):
pool = multiprocessing.Pool(threads)
params = [(equipment, sparql) for equipment in get_equipment(endpoint=sparql)]
return list(chain.from_iterable(pool.starmap(process_equipment, params)))
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--threads', default=8, help='number of threads to use (default = 8)')
parser.add_argument('--es', default="http://localhost:9200", help="elasticsearch service URL")
parser.add_argument('--publish', default=False, action="store_true", help="publish to elasticsearch?")
parser.add_argument('--rebuild', default=False, action="store_true", help="rebuild elasticsearch index?")
parser.add_argument('--mapping', default="mappings/equipment.json", help="publication elasticsearch mapping document")
parser.add_argument('--sparql', default='https://dofamp.tw.rpi.edu/fuseki/ampo/query', help='sparql endpoint')
parser.add_argument('out', metavar='OUT', help='elasticsearch bulk ingest file')
args = parser.parse_args()
# generate bulk import document for publications
records = generate(threads=int(args.threads), sparql=args.sparql)
# save generated bulk import file so it can be backed up or reviewed if there are publish errors
with open(args.out, "w") as bulk_file:
bulk_file.write('\n'.join(records)+'\n')
# publish the results to elasticsearch if "--publish" was specified on the command line
if args.publish:
bulk_str = '\n'.join(records)+'\n'
publish(bulk=bulk_str, endpoint=args.es, rebuild=args.rebuild, mapping=args.mapping)
|
[
"lic10@rpi.edu"
] |
lic10@rpi.edu
|
490df8c8807c725fdf915ccba2ff1496bd0ac937
|
60cb975f3e0251c73c457271bce8a7b2036e422b
|
/studysrc/mytest/websppider/transtest.py
|
23c308fa3b1c83bba1c6cd379e0c29e746a2f19d
|
[] |
no_license
|
49257620/reboot
|
0a2341f23bc1a6f3ae47b59f772919228c623544
|
86b348228d1a25d78c45b0e9022d7c773544373b
|
refs/heads/master
| 2018-11-17T19:19:58.969710
| 2018-09-25T03:15:57
| 2018-09-25T03:15:57
| 125,727,532
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 976
|
py
|
# encoding: utf-8
# Author: LW
import urllib.request
import urllib.parse
import time
import random
import hashlib
content = 'what fuck'
url = 'http://fanyi.youdao.com/translate?smartresult=dict&smartresult=rule'
data = {}
'''
1523493384696
1523493371204
351ac046404e1bbcb9442615f964a96d
cb2731255a15489013919b3788953bdc
'''
u = 'fanyideskweb'
d = content
f = str(int(time.time()*1000) + random.randint(1,10))
c = 'ebSeFb%=XZ%T[KZ)c(sy!'
sign = hashlib.md5((u + d + f + c).encode('utf-8')).hexdigest()
print(f)
print(sign)
data['i']: content
data['from']: 'AUTO'
data['to']: 'AUTO'
data['smartresult']: 'dict'
data['client']: 'fanyideskweb'
data['salt'] = f
data['sign'] = sign
data['doctype']: 'json'
data['version']: '2.1'
data['keyfrom']: 'fanyi.web'
data['action']: 'FY_BY_CLICKBUTTION'
data['typoResult']: 'false'
data = urllib.parse.urlencode(data).encode('utf-8')
response = urllib.request.urlopen(url, data)
html = response.read().decode('utf-8')
print(html)
|
[
"49257620@qq.com"
] |
49257620@qq.com
|
fbb7c0b773c663b598397c813719054f055a6897
|
1dacbf90eeb384455ab84a8cf63d16e2c9680a90
|
/lib/python2.7/site-packages/openpyxl/worksheet/pivot.py
|
b1905be6298ea1c57f774cae821fbc482b8bf25b
|
[
"Python-2.0",
"Apache-2.0",
"BSD-3-Clause",
"LicenseRef-scancode-unknown"
] |
permissive
|
wangyum/Anaconda
|
ac7229b21815dd92b0bd1c8b7ec4e85c013b8994
|
2c9002f16bb5c265e0d14f4a2314c86eeaa35cb6
|
refs/heads/master
| 2022-10-21T15:14:23.464126
| 2022-10-05T12:10:31
| 2022-10-05T12:10:31
| 76,526,728
| 11
| 10
|
Apache-2.0
| 2022-10-05T12:10:32
| 2016-12-15T05:26:12
|
Python
|
UTF-8
|
Python
| false
| false
| 4,984
|
py
|
from __future__ import absolute_import
# Copyright (c) 2010-2015 openpyxl
from openpyxl.descriptors import (
Bool,
Integer,
String,
Set,
NoneSet,
)
from openpyxl.descriptors.serialisable import Serialisable
class PivotSelection(Serialisable):
pane = Set(values=("bottomRight", "topRight", "bottomLeft", "topLeft"))
showHeader = Bool()
label = Bool()
data = Bool()
extendable = Bool()
count = Integer()
axis = String(allow_none=True)
dimension = Integer()
start = Integer()
min = Integer()
max = Integer()
activeRow = Integer()
activeCol = Integer()
previousRow = Integer()
previousCol = Integer()
click = Integer()
def __init__(self,
pane=None,
showHeader=None,
label=None,
data=None,
extendable=None,
count=None,
axis=None,
dimension=None,
start=None,
min=None,
max=None,
activeRow=None,
activeCol=None,
previousRow=None,
previousCol=None,
click=None):
self.pane = pane
self.showHeader = showHeader
self.label = label
self.data = data
self.extendable = extendable
self.count = count
self.axis = axis
self.dimension = dimension
self.start = start
self.min = min
self.max = max
self.activeRow = activeRow
self.activeCol = activeCol
self.previousRow = previousRow
self.previousCol = previousCol
self.click = click
class PivotArea(Serialisable):
field = Integer(allow_none=True)
type = NoneSet(values=("normal", "data", "all", "origin", "button", "topEnd"))
dataOnly = Bool()
labelOnly = Bool()
grandRow = Bool()
grandCol = Bool()
cacheIndex = Bool()
outline = Bool()
offset = String()
collapsedLevelsAreSubtotals = Bool()
axis = String(allow_none=True)
fieldPosition = Integer(allow_none=True)
def __init__(self,
field=None,
type=None,
dataOnly=None,
labelOnly=None,
grandRow=None,
grandCol=None,
cacheIndex=None,
outline=None,
offset=None,
collapsedLevelsAreSubtotals=None,
axis=None,
fieldPosition=None):
self.field = field
self.type = type
self.dataOnly = dataOnly
self.labelOnly = labelOnly
self.grandRow = grandRow
self.grandCol = grandCol
self.cacheIndex = cacheIndex
self.outline = outline
self.offset = offset
self.collapsedLevelsAreSubtotals = collapsedLevelsAreSubtotals
self.axis = axis
self.fieldPosition = fieldPosition
class PivotAreaReferences(Serialisable):
count = Integer()
def __init__(self, count=None):
count = count
class PivotAreaReference(Serialisable):
field = Integer(allow_none=True)
count = Integer()
selected = Bool()
byPosition = Bool()
relative = Bool()
defaultSubtotal = Bool()
sumSubtotal = Bool()
countASubtotal = Bool()
avgSubtotal = Bool()
maxSubtotal = Bool()
minSubtotal = Bool()
productSubtotal = Bool()
countSubtotal = Bool()
stdDevSubtotal = Bool()
stdDevPSubtotal = Bool()
varSubtotal = Bool()
varPSubtotal = Bool()
def __init__(self,
field=None,
count=None,
selected=None,
byPosition=None,
relative=None,
defaultSubtotal=None,
sumSubtotal=None,
countASubtotal=None,
avgSubtotal=None,
maxSubtotal=None,
minSubtotal=None,
productSubtotal=None,
countSubtotal=None,
stdDevSubtotal=None,
stdDevPSubtotal=None,
varSubtotal=None,
varPSubtotal=None):
self.field = field
self.count = count
self.selected = selected
self.byPosition = byPosition
self.relative = relative
self.defaultSubtotal = defaultSubtotal
self.sumSubtotal = sumSubtotal
self.countASubtotal = countASubtotal
self.avgSubtotal = avgSubtotal
self.maxSubtotal = maxSubtotal
self.minSubtotal = minSubtotal
self.productSubtotal = productSubtotal
self.countSubtotal = countSubtotal
self.stdDevSubtotal = stdDevSubtotal
self.stdDevPSubtotal = stdDevPSubtotal
self.varSubtotal = varSubtotal
self.varPSubtotal = varPSubtotal
class Index(Serialisable):
v = Integer()
def __init__(self, v=None):
self.v = v
|
[
"wgyumg@mgail.com"
] |
wgyumg@mgail.com
|
df68a0ed14ee3d9143270beb9c86e524ff1c717e
|
907d9d6ab8a2bb296a704e9338badde755e7d9f3
|
/testSpn.py
|
1e0679dff3917a51a722731b42278161d6d16da2
|
[] |
no_license
|
nisargap/spn-crypto
|
ddd6082629677fd21879e9feecac51b7bef73363
|
f072264123c0058b5e89356689e11b37723b9d8f
|
refs/heads/master
| 2021-01-10T05:43:45.970971
| 2015-10-05T19:31:30
| 2015-10-05T19:31:30
| 43,708,513
| 1
| 3
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,541
|
py
|
''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''
' Author: Nisarga Patel
' Document: testSpn.py
' Description: This is a test file for the Substitution Permutation
' Network module created to encrypt binary plaintext strings. The
' exact key for this test is defined at the top of the SPNencrypt
' module. This test file was created in order to look for cipher
' text attacks in this specific Substitution Permutation Network.
'
''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''
import SPNencrypt
def main():
plain = "0010011010110111"
SPNencrypt.spn(plain)
plain = "1111111111111111"
SPNencrypt.spn(plain)
plain = "0000000000000000"
SPNencrypt.spn(plain)
plain = "1111111111111110"
SPNencrypt.spn(plain)
plain = "1110111111011110"
SPNencrypt.spn(plain)
plain = "1110101011011110"
SPNencrypt.spn(plain)
plain = "1110101000011110"
SPNencrypt.spn(plain)
plain = "0101010101010101"
SPNencrypt.spn(plain)
plain = "0101010101010101"
SPNencrypt.spn(plain)
plain = "0101000000000101"
SPNencrypt.spn(plain)
plain = "0101000010100101"
SPNencrypt.spn(plain)
plain = "1110111111110101"
SPNencrypt.spn(plain)
plain = "0000000100001000"
SPNencrypt.spn(plain)
plain = "0001000100001001"
SPNencrypt.spn(plain)
plain = "0110111001110111"
SPNencrypt.spn(plain)
plain = "1111011111011101"
SPNencrypt.spn(plain)
main()
|
[
"patelnisarga1@gmail.com"
] |
patelnisarga1@gmail.com
|
b17874da1534c1635ec7a910c3ce1d32eda7ff50
|
5252efd0922ea5be93dfc63db6de282184505346
|
/ds/tests/test_linked_list.py
|
72dc77493dcc768bc3624b55b39b9e4ab6554da2
|
[] |
no_license
|
faddy/ds-with-python
|
157b35a5f22107f6dfba7604ed3ca87d33df6c5e
|
6fba0eeb4552fa03fcbfb2f84ce747a2dc2c3e79
|
refs/heads/master
| 2016-09-11T05:02:18.879067
| 2013-08-18T21:47:46
| 2013-08-18T21:47:46
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,453
|
py
|
import unittest
from data_structures.linked_lists import Node
from data_structures.linked_lists import UnorderedList
class TestNode(unittest.TestCase):
def setUp(self):
self.input_list = [0, -4, 10, None, 'one']
def test_object_creation(self):
node_list = [Node(item)for item in self.input_list]
for node in node_list:
self.assertIsNotNone(node)
def test_data_and_next(self):
node_list = [Node(item)for item in self.input_list]
for node, inp in zip(node_list, self.input_list):
self.assertEqual(node.get_data(), inp)
self.assertIsNone(node.get_next())
def test_setting_next(self):
node = Node([])
node.set_next(Node(3))
self.assertEqual(node.get_next().get_data(), 3)
class TestLinkedListCreation(unittest.TestCase):
def test_list_creation(self):
llist = UnorderedList()
self.assertIsNotNone(llist)
class TestLinkedList(unittest.TestCase):
def setUp(self):
self.llist = UnorderedList()
self.input_list = [0, -4, 10, None, 'one']
def populate_list(self):
for item in self.input_list:
self.llist.append(item)
def node_spitter(self):
node = self.llist.head
while node:
yield node
node = node.get_next()
def test_list_head(self):
self.assertIsNone(self.llist.head)
self.assertTrue(self.llist.is_empty())
def test_add_and_length(self):
self.populate_list()
self.assertEqual(self.llist.length(), len(self.input_list))
for i, j in zip(self.node_spitter(), self.input_list):
self.assertEqual(i.get_data(), j)
def test_index(self):
self.populate_list()
self.assertEqual(self.llist.index(0), 0)
self.assertEqual(self.llist.index(-4), 1)
self.assertEqual(self.llist.index(None), 3)
self.assertEqual(self.llist.index('one'), 4)
def test_return_last_node(self):
self.assertIsNone(self.llist._return_last_node())
self.populate_list()
self.assertEqual(self.llist._return_last_node().get_data(), self.input_list[-1])
def test_insert(self):
self.populate_list()
self.assertRaises(ValueError, self.llist.insert, -1, 5)
self.assertRaises(ValueError, self.llist.insert, len(self.input_list)+2, 5)
self.llist.insert(0, 'zeroth')
result = [n.get_data() for n in self.llist.get_node_generator()]
self.assertEqual(self.llist.head.get_data(), 'zeroth')
self.assertEqual(result, ['zeroth', 0, -4, 10, None, 'one'])
self.llist.insert(1, 'first')
result = [n.get_data() for n in self.llist.get_node_generator()]
self.assertEqual(result, ['zeroth', 'first', 0, -4, 10, None, 'one'])
self.llist.insert(6, 'sixth')
result = [n.get_data() for n in self.llist.get_node_generator()]
self.assertEqual(result, ['zeroth', 'first', 0, -4, 10, None, 'sixth', 'one'])
self.llist.insert(8, 'last')
result = [n.get_data() for n in self.llist.get_node_generator()]
self.assertEqual(result, ['zeroth', 'first', 0, -4, 10, None, 'sixth', 'one', 'last'])
def test_pop(self):
self.assertRaises(Exception, self.llist.pop)
self.populate_list()
result = []
while not self.llist.is_empty():
item = self.llist.pop()
result.append(item)
self.assertEqual(result, list(reversed(self.input_list)))
def test_search(self):
self.populate_list()
self.assertTrue(self.llist.search(10))
self.assertTrue(self.llist.search(None))
self.assertFalse(self.llist.search(123))
def test_remove(self):
self.populate_list()
self.llist.remove(10)
result = [n.get_data() for n in self.llist.get_node_generator()]
self.input_list.remove(10)
self.assertEqual(result, self.input_list)
self.llist.remove(None)
result = [n.get_data() for n in self.llist.get_node_generator()]
self.input_list.remove(None)
self.assertEqual(result, self.input_list)
self.llist.remove(0)
result = [x.get_data() for x in self.llist.get_node_generator()]
self.input_list.remove(0)
self.assertEqual(result, self.input_list)
if __name__ == '__main__':
unittest.main()
|
[
"fahadghanidgp@gmail.com"
] |
fahadghanidgp@gmail.com
|
d716a64d25d8ed53904876bd54c1a98a7b88deb5
|
9dab41a71bf19a9ad17ee3e9f77c0f58aebd1d6d
|
/python/uline/uline/uline/handlers/app/distributor/balance/distributorBalanceList.py
|
4116d637e99da40fb08daa5c8fdc82a1bdbb023b
|
[] |
no_license
|
apollowesley/Demo
|
f0ef8ec6c4ceb0aec76771da8dd9a62fb579eac8
|
471c4af95d3a7222d6933afc571a8e52e8fe4aee
|
refs/heads/master
| 2021-02-15T04:01:51.590697
| 2018-01-29T01:44:29
| 2018-01-29T01:44:29
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,367
|
py
|
# -*- coding: utf-8 -*-
from __future__ import division
import tornado.web
import tornado.gen
from uline.handlers.baseHandlers import DtAdminHandler
from .form import DistributorBalanceList
from uline.public.constants import TO_PAY, PAY_CHANNEL
from datetime import timedelta, datetime
from uline.public.permit import check_permission
class DistributorBalanceListHandler(DtAdminHandler):
@tornado.web.authenticated
@check_permission
def prepare(self):
form = DistributorBalanceList(self)
if not form.validate():
self.redirect('/dist/balance/dt/list')
return
self.dt_daily_balance_no = form.ddb_no.data
@tornado.web.asynchronous
@tornado.gen.coroutine
def get(self):
data = self.do_execute()
self.render('distributor/balance/distributorBalanceList.html', data=data)
def do_execute(self):
dt_id = self.current_user
query = """select
to_char(ddbi.pay_start_time, 'YYYY-MM-DD HH24:MI:SS'),
to_char(ddbi.need_pay_time,'YYYY-MM-DD'),
ddbi.rcvAcctName,
ddbi.channel,
ddbi.rcvacctno,
ddbi.rcvBankName,
ddbi.tranAmt,
ddbi.pay_status,
ddbi.failure_details
from dt_daily_balance_info as ddbi
inner join dt_balance db on db.dt_id = ddbi.dt_id
where ddbi.dt_id=%(dt_id)s
and ddbi.dt_daily_balance_no=%(dt_daily_balance_no)s;"""
ret = self.db.selectSQL(query, {'dt_daily_balance_no': self.dt_daily_balance_no, 'dt_id': dt_id})
fields = ['create_at', 'need_pay_time', 'rcvAcctName', 'channel', 'balance_account', 'rcvBankName',
'tranAmt', 'pay_status', 'failure_details']
dt_info = dict(zip(fields, ret))
dt_info['tranAmt'], dt_info['pay_status'], dt_info['channel'] = dt_info['tranAmt'] / 100, \
TO_PAY[str(dt_info['pay_status'])], \
PAY_CHANNEL[str(dt_info['channel'])],
dt_info['need_pay_time'] = datetime.strptime(dt_info['need_pay_time'], '%Y-%m-%d') - timedelta(days=1)
dt_info['need_pay_time'] = datetime.strftime(dt_info['need_pay_time'], '%Y-%m-%d')
# todo 缺少划付状态详情数据表
return dt_info
|
[
"36821277@qq.com"
] |
36821277@qq.com
|
2e93f6d19a1051930f841ffd22fefbc664870f58
|
3c2dd7932308cf47ca9910d963affa5a67beb97f
|
/model/callback.py
|
c2177b884353fae0e4f5603bcba99584c3450724
|
[] |
no_license
|
HaroldLiuJ/CGSum
|
cb65dc65a7c300f32412bf7f3fbfea4c07d2680c
|
ef372b0b126553d531fedb53f0a1a72b36a82b63
|
refs/heads/main
| 2023-06-06T10:17:54.917198
| 2021-06-25T14:09:23
| 2021-06-25T14:09:23
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,289
|
py
|
import os
import sys
import time
import numpy as np
import torch
from data_util.logging import logger
from data_util.utils import calc_running_avg_loss
from fastNLP.core.callback import Callback, EarlyStopError
class TrainCallback(Callback):
def __init__(self, config, patience=10, quit_all=True):
super().__init__()
self.config = config
self.patience = patience
self.wait = 0
self.running_avg_loss = 0
self.loss_update_every = []
if type(quit_all) != bool:
raise ValueError("In KeyBoardInterrupt, quit_all arguemnt must be a bool.")
self.quit_all = quit_all
def on_epoch_begin(self):
self.epoch_start_time = time.time()
if self.epoch == self.config.coverage_at:
self.config.is_coverage = True
if self.config.is_coverage:
self.trainer.do_valid = True
else:
self.trainer.do_valid = False
def on_backward_begin(self, loss):
self.loss_update_every.append(loss.item())
if isinstance(loss, tuple) and not np.isfinite(loss[0].item()):
logger.error("train Loss is not finite. Stopping.")
logger.info(loss[0].item())
for name, param in self.model.named_parameters():
if param.requires_grad:
logger.info(name)
logger.info(param.grad.data.sum())
raise Exception("train Loss is not finite. Stopping.")
if self.step % self.update_every == 0:
assert len(self.loss_update_every) == self.update_every
loss_batch = sum(self.loss_update_every)
self.loss_update_every = []
# report the loss
if self.step < 10 or self.step % 1000 == 0:
logger.info("|epoch: %d step: %d log_loss: %.4f |"
% (self.epoch, self.step / self.update_every, loss_batch))
self.running_avg_loss = calc_running_avg_loss(loss_batch, self.running_avg_loss,
self.step / self.update_every)
def on_backward_end(self):
if self.config.max_grad_norm:
torch.nn.utils.clip_grad_norm_(self.model.parameters(), self.config.max_grad_norm)
def on_epoch_end(self):
logger.info(
' | end of epoch {:3d} | time: {:5.2f}s | '.format(self.epoch, (time.time() - self.epoch_start_time)))
def on_valid_begin(self):
self.valid_start_time = time.time()
def on_valid_end(self, eval_result, metric_key, optimizer, is_better_eval):
logger.info(
' | end of valid {:3d} | time: {:5.2f}s | '.format(self.epoch, (time.time() - self.valid_start_time)))
# save the better checkpoint
if is_better_eval:
logger.info("got better results on dev, save checkpoint.. ")
model_save_path = os.path.join(self.config.model_path,
f'CGSum_{self.config.setting}_{self.config.n_hop}hopNbrs.pt')
checkpoint = {"state_dict": self.model.state_dict(), "config": self.model.config.__dict__}
torch.save(checkpoint, model_save_path)
# early stop
if not is_better_eval:
if self.wait == self.patience:
raise EarlyStopError("Early stopping raised.")
else:
self.wait += 1
else:
self.wait = 0
def on_exception(self, exception):
if isinstance(exception, KeyboardInterrupt):
if self.quit_all is True:
sys.exit(0)
else:
pass
else:
raise exception
class LRDecayCallback(Callback):
def __init__(self, parameters, decay_rate=1e-3, steps=100):
super().__init__()
self.paras = parameters
self.decay_rate = decay_rate
self.steps = steps
def on_step_end(self):
if self.step % self.update_every == 0:
step = self.step // self.update_every
if step % self.steps == 0:
for para in self.paras:
para['lr'] = para['lr'] * (1 - self.decay_rate)
|
[
"cxan@ChenxindeMacBook-Pro.local"
] |
cxan@ChenxindeMacBook-Pro.local
|
4c10f5dbe66a1ecd6b2cb0e0d1cb6a3481ac2ca0
|
1b94c7cfd66804fe8d40b5def35e4b9b18d69ba2
|
/old_py2/controllers/apiai_controller.py
|
dfff3930d0c210a7d0d4eb8c2af95d15d9d7e374
|
[
"MIT"
] |
permissive
|
the-blue-alliance/the-blue-alliance
|
3dc210a9611ce9b240907ffd420f78040318dcdc
|
6d42f3cdb2f785d192f2871419e58aaae3445029
|
refs/heads/py3
| 2023-08-22T21:02:36.398100
| 2023-08-22T19:14:01
| 2023-08-22T19:14:01
| 888,427
| 344
| 263
|
MIT
| 2023-09-14T18:35:20
| 2010-09-04T20:34:11
|
HTML
|
UTF-8
|
Python
| false
| false
| 635
|
py
|
import json
from base_controller import LoggedInHandler
from helpers.apiai_helper import APIAIHelper
from models.sitevar import Sitevar
class APIAIHandler(LoggedInHandler):
def __init__(self, *args, **kw):
super(APIAIHandler, self).__init__(*args, **kw)
def post(self):
if self.request.headers.get('X-TBA-APIAI-Auth') != Sitevar.get_by_id('apiai.secrets').contents['key']:
return
request = json.loads(self.request.body)
self.response.headers['content-type'] = 'application/json; charset="utf-8"'
self.response.out.write(json.dumps(APIAIHelper.process_request(request)))
|
[
"noreply@github.com"
] |
noreply@github.com
|
c7323c619a500829099442ad1c4249689bb1dc1e
|
ca292e954d548c62f0c4604dc46cb9faac914a2f
|
/max_of_two.py
|
b87410a3cb5d90d5d170799f4f46230a547ce7f4
|
[] |
no_license
|
rawgni/empireofcode
|
e73ee032266e85bb062ad03e34f8c37bf69072c3
|
8f00029ddc38759c77ac6308fe65ae07c44960fc
|
refs/heads/master
| 2021-06-08T22:50:44.828587
| 2016-11-29T13:59:00
| 2016-11-29T13:59:00
| 71,658,210
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 38
|
py
|
def my_max(a, b):
return max(a,b)
|
[
"ingwar.wirjawan@gmail.com"
] |
ingwar.wirjawan@gmail.com
|
8b37209b33d201b789d2658845aa87843ef7a8e0
|
db144fdc9a1948cce066bed20912c32e1a18a8aa
|
/accounts/views.py
|
49c0aa25bf7c13a2faa3ed61bf4acc3c6a75f458
|
[] |
no_license
|
masato932/django-blog3
|
cd01101cbffdbaa33d2cb9bf696e5a5cdf8cd6fa
|
769068ba356cf8e0cc0bbde76e82e116e58b8bab
|
refs/heads/main
| 2023-05-13T20:14:43.706480
| 2021-06-05T14:03:13
| 2021-06-05T14:03:13
| 365,480,981
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 484
|
py
|
from django.shortcuts import render, redirect
from allauth.account import views
class LoginView(views.LoginView):
template_name = 'accounts/login.html'
class LogoutView(views.LogoutView):
template_name = 'accounts/logout.html'
def post(self, *args, **kwargs):
if self.request.user.is_authenticated:
self.logout()
return redirect('/')
class SignupView(views.SignupView):
template_name = 'accounts/signup.html'
# Create your views here.
|
[
"masatowada66@gmail.com"
] |
masatowada66@gmail.com
|
69f95154a6b1698708090146d7eafde38aea3f17
|
955781aa0539cb4b61c189ad86d61f332c60f307
|
/project/rmethods_rec_train.py
|
bfe1b663e861095bfec55ea328a660409faea4a1
|
[
"Apache-2.0"
] |
permissive
|
LARC-CMU-SMU/coleridge-rich-context-larc
|
26039a436555fb0ab0d38f4af442f389f32da5ca
|
a0b6cba59b843bbaf98cdcb5e661b1c524a79db0
|
refs/heads/master
| 2020-04-08T18:45:30.960621
| 2019-06-15T10:02:11
| 2019-06-15T10:02:11
| 159,623,704
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,811
|
py
|
import argparse
import numpy as np
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.pipeline import Pipeline
from sklearn.compose import ColumnTransformer
from sklearn.linear_model import SGDClassifier
from sklearn.utils import shuffle
from sklearn.externals import joblib
from rcc_conf import RAND_SEED
from rcc_utils import json_from_file
class RMethodContextExtractor(BaseEstimator, TransformerMixin):
"""Extract title and contents features.
"""
def fit(self, x, y=None):
return self
def transform(self, data):
# construct object dtype array with two columns
# first column = 'title' and second column = 'contents'
features = np.empty(shape=(len(data), 2), dtype=object)
for i, d in enumerate(data):
features[i, 0] = d['title']
features[i, 1] = d['contents']
return features
def _generate_train_model(data_train, labels_train,
output_file):
print('Training research method model for recommendation...')
features = ColumnTransformer(
[
('title', TfidfVectorizer(ngram_range=(1, 2)), 0),
('contents', TfidfVectorizer(ngram_range=(1, 1)), 1),
],
transformer_weights={
'title': 1.0,
'contents': 1.0
}
)
sgd = SGDClassifier(loss='log', penalty='l2',
alpha=1e-4,
random_state=RAND_SEED,
max_iter=2000, tol=1e-3)
pipeline = Pipeline([
('feature_set_extractor', RMethodContextExtractor()),
('union', features),
('clf', sgd)])
pipeline.fit(data_train, labels_train)
joblib.dump(pipeline, output_file, compress='zlib')
print('Model file {} saved.'.format(output_file))
def main(args):
rmethod_ctx_train = json_from_file(args.input)
rmethod_ctx_train = [d for d in rmethod_ctx_train
if d['title'] is not None]
labels_train = [d['method'] for d in rmethod_ctx_train]
rmethod_ctx_train, labels_train = shuffle(rmethod_ctx_train, labels_train,
random_state=RAND_SEED)
_generate_train_model(rmethod_ctx_train, labels_train, args.output)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Train classifier model for \
research method recommendation.')
parser.add_argument('--input', type=str, required=True,
help='Filename of input dataset to train the models.')
parser.add_argument('--output', type=str, required=True,
help='Filename of model output')
args = parser.parse_args()
main(args)
|
[
"philipskokoh@gmail.com"
] |
philipskokoh@gmail.com
|
6714b04c2f025639a4022ead1621dce971eb5047
|
44721e45f5d2c8fcfd37e11cf246b9b25939976a
|
/images_fetcher/main.py
|
b3480aefa50d013c311532d381614d75c00e407d
|
[] |
no_license
|
pehovorka/street-object-detection
|
5e86c9fa2c2721875ddbed05b2f54e2df0445f3c
|
7501ebaf44596689a05c99bddbbe4494a1853c8f
|
refs/heads/master
| 2023-06-08T05:46:23.978260
| 2021-06-20T21:11:32
| 2021-06-20T21:11:32
| 378,380,271
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,261
|
py
|
import urllib.request
import urllib.parse
import json
import base64
import os
from datetime import datetime
from zoneinfo import ZoneInfo
from google.cloud import storage
def get_images_from_all_cameras():
cameras_list_url = "https://bezpecnost.praha.eu/Intens.CrisisPortalInfrastructureApp/cameras?format=json"
req = urllib.request.Request(cameras_list_url, data=None, headers={
"User-Agent": "Chrome"})
res = urllib.request.urlopen(req)
cameras = json.load(res)["cameras"]
for camera in cameras:
camera_url = f"https://bezpecnost.praha.eu/Intens.CrisisPortalInfrastructureApp/cameras/{camera['id']}/image?"
download_decode_save_image(camera_url, camera['name'], "images")
def download_decode_save_image(url: str, name: str = "image", path: str = ".",):
req = urllib.request.Request(url, data=None, headers={
"User-Agent": "Chrome"})
res = urllib.request.urlopen(req)
base64image = json.load(res)['contentBase64']
filename = f"{path}/{name}.jpg"
os.makedirs(os.path.dirname(filename), exist_ok=True)
with open(filename, "wb") as fh:
fh.write(base64.b64decode(base64image))
def download_decode_save_to_bucket_image(url: str, name: str = "image", bucket: str = "theatre_dataset_images"):
storage_client = storage.Client(project="ml-semestralka")
bucket = storage_client.get_bucket(bucket)
blob = bucket.blob(f'{name}.jpg')
req = urllib.request.Request(url, data=None, headers={
"User-Agent": "Chrome"})
res = urllib.request.urlopen(req)
base64image = json.load(res)['contentBase64']
file = base64.b64decode(base64image)
temp_location = "/tmp/image.jpg"
os.makedirs(os.path.dirname(temp_location), exist_ok=True)
with open(temp_location, "wb") as fh:
fh.write(file)
with open(temp_location, 'rb') as jpg:
blob.upload_from_file(jpg, True, None, "image/jpg")
def download_national_theatre_image(data=None, context=None):
url = "https://bezpecnost.praha.eu/Intens.CrisisPortalInfrastructureApp/cameras/500046/image"
current_time = datetime.now(ZoneInfo("Europe/Prague"))
download_decode_save_to_bucket_image(
url, current_time.strftime("%Y-%m-%d %H:%M:%S"))
return "Success!"
|
[
"pehovorka@gmail.com"
] |
pehovorka@gmail.com
|
b62893ee1712e3ddf4365071e6596e2d820ac5dc
|
cf57cd3355471f035ca429302742b4eb4baf1214
|
/Comparações/SHI-TOMASI/SHI-TOMASI_sift.py
|
7a91ba9cb2b4ae56f47b6d8069c64cbee54c797b
|
[] |
no_license
|
RobotColony-UEFS/feature-match
|
c56d78230d86948e5612a9645c71a0647eb94604
|
ac421989aa1ee3893243122a0cf041b30e038a28
|
refs/heads/master
| 2022-11-27T15:31:20.570505
| 2020-08-04T19:24:17
| 2020-08-04T19:24:17
| 285,063,878
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,365
|
py
|
#coding: utf-8
import cv2
import numpy as np
import mysql.connector
import math
mydb = mysql.connector.connect(
host="localhost",
user="descritores",
passwd="12345678",
database="referencias"
)
def desvio (vetResult):
# Desvio padrão populacional
soma = float(sum(vetResult))
media = soma/len(vetResult)
res = 0
for valor in vetResult:
res += ((valor - media)**2)
desvio = (math.sqrt(res/len(vetResult)))
return (media, desvio)
vet_matches = []
vet_corretos = []
img11 = cv2.imread("../../imgReferencia/img00.jpg", 0)
altura = img11.shape[0]
largura = img11.shape[1]
img1 = cv2.resize(img11, (int(largura*0.4), int(altura*0.4)))
corners11 = cv2.goodFeaturesToTrack(img1, 100, 0.01, 10)
corners1 = np.int0(corners11)
kp1 = cv2.KeyPoint_convert(corners1)
sift = cv2.xfeatures2d.SIFT_create()
bf = cv2.BFMatcher(cv2.NORM_L2, crossCheck=True)
kp1, des1 = sift.compute(img1, kp1)
quantidadeImagens = 1
while(quantidadeImagens<=13):
acertos = 0
img22 = cv2.imread("../../imgTeste/img"+str(quantidadeImagens)+".jpg", 0)
altura2 = img22.shape[0]
largura2 = img22.shape[1]
img2 = cv2.resize(img22, (int(largura2*0.4), int(altura2*0.4)))
corners22 = cv2.goodFeaturesToTrack(img2, 100, 0.01, 10)
corners2 = np.int0(corners22)
kp2 = cv2.KeyPoint_convert(corners2)
kp2, des2 = sift.compute(img2, kp2)
mat = bf.match(des1,des2)
mat = sorted(mat, key = lambda x:x.distance)
matches = mat[0:150]
with open("../../imgTeste/img"+str(quantidadeImagens)+".txt",'r') as f:
texto=f.readlines()
posicao_x= np.float_(texto[0:4])
posicao_y = np.float_(texto[4:8])
min_x = float(min(posicao_x))
max_x = float(max(posicao_x))
min_y = float(min(posicao_y))
max_y = float(max(posicao_y))
if len(matches)>10:
src_pts = np.float32([ kp1[m.queryIdx].pt for m in matches ]).reshape(-1,1,2)
dst_pts = np.float32([ kp2[m.trainIdx].pt for m in matches ]).reshape(-1,1,2)
M, mask = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC, 5.0)
h,w = img1.shape
pts = np.float32([ [0,0],[0,h-1],[w-1,h-1],[w-1,0] ]).reshape(-1,1,2)
dst = cv2.perspectiveTransform(pts,M)
img2 = cv2.polylines(img2,[np.int32(dst)],True,255,3, cv2.LINE_AA)
for pos in dst_pts:
if((pos[0][0]>(min_x) and pos[0][0]<(max_x)) and (pos[0][1]>(min_y) and pos[0][1]<(max_y))):
acertos+=1
img3 = cv2.drawMatches(img1,kp1,img2,kp2,matches[:],None,flags=2)
cv2.imwrite("../resultados/shiTomasi-sift/img"+str(quantidadeImagens)+".jpg", img3)
vet_matches.append(len(matches))
vet_corretos.append(acertos)
mycursor = mydb.cursor()
sql = "INSERT INTO shiTomasi_sift(Nome, Matches, Correto, ImgReferente) VALUES (%s, %s, %s, %s)"
valor = ("ShiTomasi-Sift"+str(quantidadeImagens), len(matches), acertos, "img"+str(quantidadeImagens)+".jpg")
mycursor.execute(sql, valor)
mydb.commit()
print(len(matches), acertos)
quantidadeImagens+=1
media_matches, desvio_matches = desvio(vet_matches)
media_corretos, desvio_corretos = desvio(vet_corretos)
porcentagem = (media_corretos/media_matches)*100
sql2 = "INSERT INTO medias_desvios(Nome, MediaMatches, DesvioMatches, MediaCorretos, DesvioCorretos, Porcentagem) VALUES (%s, %s, %s, %s, %s, %s)"
valor2 = ("shiTomasi_sift", media_matches, desvio_matches, media_corretos, desvio_corretos, porcentagem)
mycursor.execute(sql2, valor2)
mydb.commit()
|
[
"samuelreboucas07@hotmail.com"
] |
samuelreboucas07@hotmail.com
|
8c8a966e406d530b9ab30c7abb9645b76d1a5898
|
bf833d3048f1eabc3e47101412ac77d14a43f332
|
/src/augment_me.py
|
e546249219c040609b22161b7fa6182efc20e5ce
|
[] |
no_license
|
DavidDavidsonDK/Person-Detection
|
c6c177ab0ddd42320741796edc372e4a5f42d11a
|
718d16e8cd1f5b4e876951365c75ee88d752c308
|
refs/heads/master
| 2020-04-11T22:34:42.747205
| 2018-12-29T07:15:33
| 2018-12-29T07:15:33
| 162,140,905
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,962
|
py
|
import numpy as np
import cv2
from ano_parser import PascalVocWriter,PascalVocReader
from plot import draw_rect
from PIL import Image
import matplotlib.pyplot as plt
import math
from tqdm import tqdm
from rotation import Rotation
from random_translation import Translate
from random_scale import Scale
from horizontal_flip import HorizontalFlip
import os
class Augmentation(object):
def __init__(self,rotation=True,translate=True,scale=True,horizontal_flip=True,**kwargs):
self.augmentators = {}
if rotation:
ag = kwargs['ag'] if 'ag' in kwargs else 30
self.rotation = Rotation(ag)
self.augmentators['rotation'] = self.rotation
if translate:
tr = kwargs['tr'] if 'tr' in kwargs else 0.2
self.translate = Translate(tr)
self.augmentators['translate'] = self.translate
if scale:
sc_lower = kwargs['sc_lower'] if 'sc_lower' in kwargs else 0.2
sc_upper = kwargs['sc_upper'] if 'sc_upper' in kwargs else 0.2
self.scale = Scale((sc_lower,sc_upper))
self.augmentators['scale'] = self.scale
if horizontal_flip:
self.horizontal_flip = HorizontalFlip()
self.augmentators['horizontal_flip'] = self.horizontal_flip
def __augment(self,im_path,im_name,ano_reader,dest_path):
im = np.array(Image.open(im_path+im_name+'.jpg'), dtype=np.uint8)
boxes = np.array([[shape[1][0][0],shape[1][0][1], shape[1][1][0],shape[1][1][1]] for shape in ano_reader.getShapes()])
d_flags = [int(shape[4]) for shape in ano_reader.getShapes()]
i = 0
for key,transformer in self.augmentators.items():
name_like = dest_path + im_name + '_'+key
ano_writer = PascalVocWriter('augmented', im_name + '_'+key+'.jpg', im.shape,localImgPath=name_like+'.jpg')
a_im,a_boxes = transformer(im,boxes.astype(np.float64))
for box in a_boxes:
ano_writer.addBndBox(math.ceil(box[0]),math.ceil(box[1]),math.ceil(box[2]),math.ceil(box[3]),'person',d_flags[i]) #the last 0 means dificult
i+=1
ano_writer.save(name_like+'.xml')
cv2.imwrite(name_like+'.jpg',a_im)
i = 0
def augment(self, file_path='../data/raw/citycams_1/', dest_path='../data/augmented/'):
for img_or_xml in tqdm(os.listdir(file_path)):
if img_or_xml.endswith('.jpg'):
main_part_of_name = img_or_xml.split('.')[0]
ano_reader = PascalVocReader(file_path+main_part_of_name+'.xml')
self.__augment(file_path,main_part_of_name,ano_reader,dest_path)
if __name__ == '__main__':
print('Augmantation start...')
aug = Augmentation(rotation=True, translate=True, scale=True, horizontal_flip=True,ag = 20,tr=0.2,sc_lower=0.2,sc_upper=0.6)
aug.augment()
print('Augmantation finish')
|
[
"d.kprogrammer0.0@gmail.com"
] |
d.kprogrammer0.0@gmail.com
|
f6b761abaee5b80a9b12e42bbd686bac6e18ac1e
|
50b01425d7542e5c37c1f6d43041337b8fca1aaf
|
/testMockDirac/StatusOperation.py
|
9525fe5f60a7bc20e0bd58f44c17c331aa1de97a
|
[] |
no_license
|
coberger/PythonTestCern
|
98f0d05447a9e3987f2d41463447b5ea5a97e874
|
c45bc6c3102dcf08d6675d3967d733028e860b67
|
refs/heads/master
| 2021-01-13T02:02:40.042771
| 2015-09-09T12:39:40
| 2015-09-09T12:39:40
| 33,874,240
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 115
|
py
|
class StatusOperation (object):
def __init__( self, lfn, status ):
self.lfn = lfn
self.status = status
|
[
"corent.berger@gmail.com"
] |
corent.berger@gmail.com
|
59c2e6f44d3b4e5943e33fae3055bca489248b1f
|
0d261e74c5cfcc7631cf17e5e5ea67ac146c9929
|
/1-Neural_Networks_and_Deep_Learning/vectorization.py
|
7a583c48d8b1449c4511c9e9c14c5de5001332b3
|
[] |
no_license
|
zoro16/deep-learning
|
61c4b2d85867fc9761badf1286a29dcc1360130c
|
4130b3591bb04cff5771e7cca0e122ab897afff0
|
refs/heads/master
| 2021-09-10T12:19:14.013642
| 2018-03-26T06:11:30
| 2018-03-26T06:11:30
| 113,404,656
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 383
|
py
|
import numpy as np
import time
a = np.random.rand(1000000)
b = np.random.rand(1000000)
started = time.time()
c = np.dot(a, b)
ended = time.time()
print("Vertorized version: {} ms".format(str(1000*(ended-started))))
c = 0
started = time.time()
for i in range(1000000):
c += a[i] * b[i]
ended = time.time()
print("For loop version: {} ms".format(str(1000*(ended-started))))
|
[
"mohamed.saleh16@gmail.com"
] |
mohamed.saleh16@gmail.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.