blob_id
stringlengths 40
40
| language
stringclasses 1
value | repo_name
stringlengths 5
133
| path
stringlengths 2
333
| src_encoding
stringclasses 30
values | length_bytes
int64 18
5.47M
| score
float64 2.52
5.81
| int_score
int64 3
5
| detected_licenses
listlengths 0
67
| license_type
stringclasses 2
values | text
stringlengths 12
5.47M
| download_success
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|
a6b14ab0e5a78ae9f9b88c8085ed074fa598fa30
|
Python
|
sanjeevs/lottery
|
/lottery.py
|
UTF-8
| 1,601
| 3.765625
| 4
|
[] |
no_license
|
#!usr/bin/env python
import random
def is_jackpot(winner, my_card):
for digit in winner:
if(digit not in my_card):
return False
return True
def is_deuce(winner, my_card):
win_lst = [char for char in winner]
num_matches = 0
for i in win_lst:
if i in my_card:
num_matches += 1
return True if (num_matches == 2) else False
def get_ticket():
seq_digits = ["1", "2", "3", "4", "5", "6", "7"]
random.shuffle(seq_digits)
d = ""
for i in range(3):
d += seq_digits[i]
assert(is_ticket_valid(d))
return d
def is_ticket_valid(ticket):
""" A ticket is valid is the digits are unique"""
checker = 0
for i in range(len(ticket)):
val = ord(ticket[i]) - ord('0')
if(checker & (1 << val)):
return False
else:
checker |= (1 << val)
return True
def get_book_of_tickets():
book = []
for _ in range(7):
book.append(get_ticket())
return book
def is_jackpot_in_book(book, winning_ticket):
lst = [x for x in book if is_jackpot(winning_ticket, x) == True]
return True if(len(lst) > 0) else False
def is_min_double_deuce_in_book(book, winning_ticket):
lst = [x for x in book if is_deuce(winning_ticket, x) == True]
return True if len(lst) >= 2 else False
def is_book_winner(book, winning_ticket):
return True if(is_jackpot_in_book(book, winning_ticket) or
is_min_double_deuce_in_book(book, winning_ticket)) else False
def magic_book():
return ["123", "145", "167", "247", "256", "346", "357"]
| true
|
5f362e91efc60b610b23f7cf94022903dc39a709
|
Python
|
Booharin/lesson_1
|
/task_5.py
|
UTF-8
| 528
| 3.703125
| 4
|
[] |
no_license
|
revenue = int(input('Ваша выручка: '))
costs = int(input('Вашы расходы: '))
profit = revenue - costs
if profit > 0:
print(f"Ваша прибыль составила: {profit}")
print(f"Рентабельность: {(profit / revenue) * 100:.3f}%")
staff_number = int(input('Количество сотрудников: '))
print(f"Прибыль на одного сотрудника: {(profit / staff_number):.3f}")
else:
print(f"Нихрена вы не заработали")
| true
|
257c6705a44b8fa4da1308645b9cafebfd772c9c
|
Python
|
jgeltch/dumb
|
/dumb.py
|
UTF-8
| 128
| 3.328125
| 3
|
[
"MIT"
] |
permissive
|
import time
before = time.time()
for i in range(0,2**32):
if i%1000000 == 0:
print(i)
print(time.time()-before)
| true
|
3478ff1c404906da49fb5e98ced8ebc8b90eb7b0
|
Python
|
skydownacai/DGA-Domain-Detection
|
/DataStructure.py
|
UTF-8
| 472
| 2.515625
| 3
|
[] |
no_license
|
from typing import NamedTuple, List, Optional
import torch.tensor as Tensor
class Example(NamedTuple):
domain_name : str #域名
label : Optional[bool] #是否为恶意地址
char_ids : List[int] #每个字符在vocab中的id
domain_len : int #域名长度
class BatchInputFeature(NamedTuple):
domain_names : List[str] #域名
labels : Tensor #是否为恶意地址
char_ids : Tensor #每个字符在vocab中的id
domain_lens : Tensor #每个域名的长度
| true
|
252cc798520d3c09df62b7296b38a610af268af0
|
Python
|
flyingGH/synthtext
|
/tools/filter_word.py
|
UTF-8
| 1,442
| 3.34375
| 3
|
[
"Apache-2.0"
] |
permissive
|
import sys
import random
def get_alpha_word(fp):
raw_words = []
with open(fp, 'r') as fd:
for line in fd:
segs = line.split()
raw_words.extend(segs)
raw_words = set(raw_words)
alpha_words = []
for word in raw_words:
if word.isalpha():
alpha_words.append(word)
return list(alpha_words)
def shuffle_word_order(words):
words_shuffle = []
for word in words:
word_list = list(word)
random.shuffle(word_list)
new_word = ''.join(word_list)
words_shuffle.append(new_word)
return words_shuffle
def gen_corpora(words, min_len, max_len, nlines):
random.seed(0)
lines = []
for idx in range(nlines):
tlen = min_len + int(random.random() * (max_len - min_len))
sample = random.sample(words, tlen)
line = ' '.join(sample)
lines.append(line)
return lines
def save2file(lines, fp):
with open(fp, 'w') as fd:
fd.writelines('\n'.join(lines))
if __name__ == '__main__':
min_len = 5
max_len = 13
nlines = 10000
fp = sys.argv[1]
alpha_words = get_alpha_word(fp)
#alpha_words_shuffle = shuffle_word_order(alpha_words)
corpora = gen_corpora(alpha_words, min_len, max_len, nlines)
save2file(corpora, 'coca_alpha_words.txt')
#corpora = gen_corpora(alpha_words_shuffle, min_len, max_len, nlines)
#save2file(corpora, 'alpha_words_shuffle')
| true
|
50f8f7bbb898e8a54522bce3f72f314e991f34df
|
Python
|
xiemeigongzi88/PyTorch_learning
|
/Dive into Deep Learning PyTorch/code/3.8 多层感知机.py
|
UTF-8
| 623
| 3.0625
| 3
|
[] |
no_license
|
# -*- coding: utf-8 -*-
"""
Created on Sat Oct 31 20:11:48 2020
@author: sxw17
"""
# 3.8 多层感知机
# 3.8.1 隐藏层
# 3.8.2 激活函数
# 3.8.2.1 ReLU
ReLu(x) = max(x, 0)
import torch
import numpy as np
import matplotlib.pylab as plt
import sys
import d2lzh_pytorch as d2l
def xyplot(x_val, y_val, name):
d2l.set_figsize(figsize=(5,2.5))
d2l.plt.plot(x_val.detach().numpy(), y_val.detach().numpy())
d2l.plt.xlabel('x')
d2l.plt.ylabel(name+"(x)")
x = torch.arange(-9.0, 9.0, 0.1, requires_grad= True)
y = x.relu()
xyplot(x,y,'relu')
| true
|
173b07667b878347ba13a0d86d55b5b0b59e4627
|
Python
|
LukeCroteau/rsl-equip
|
/mainapp/Models/hero_data.py
|
UTF-8
| 621
| 2.6875
| 3
|
[] |
no_license
|
from sqlalchemy import Column, Integer, String
from mainapp.database import Base
class Hero(Base):
''' Base class for Hero data '''
__tablename__ = 'heroes'
id = Column(Integer, primary_key=True, index=True)
name = Column(String, index=True)
hero_type = Column(String)
hp = Column(Integer)
attack = Column(Integer)
defense = Column(Integer)
speed = Column(Integer)
crit_rate = Column(Integer)
crit_damage = Column(Integer)
resistance = Column(Integer)
accuracy = Column(Integer)
def __repr__(self):
return str.format('<Hero {} - {}>', self.id, self.name)
| true
|
2f93f957f883b48ee014fa8c784279333acad3e2
|
Python
|
Mr4x3/competition_mania
|
/lookup/static_lookups.py
|
UTF-8
| 31,368
| 2.53125
| 3
|
[] |
no_license
|
COUNTRY_CHOICES = [
('AF', 'Afghanistan'),
('AX', 'Aland Islands'),
('AL', 'Albania'),
('DZ', 'Algeria'),
('AS', 'American Samoa'),
('AD', 'Andorra'),
('AO', 'Angola'),
('AI', 'Anguilla'),
('AQ', 'Antarctica'),
('AG', 'Antigua and Barbuda'),
('AR', 'Argentina'),
('AM', 'Armenia'),
('AW', 'Aruba'),
('AU', 'Australia'),
('AT', 'Austria'),
('AZ', 'Azerbaijan'),
('BS', 'Bahamas'),
('BH', 'Bahrain'),
('BD', 'Bangladesh'),
('BB', 'Barbados'),
('BY', 'Belarus'),
('BE', 'Belgium'),
('BZ', 'Belize'),
('BJ', 'Benin'),
('BM', 'Bermuda'),
('BT', 'Bhutan'),
('BO', 'Bolivia'),
('BQ', 'Bonaire, Saint Eustatius and Saba '),
('BA', 'Bosnia and Herzegovina'),
('BW', 'Botswana'),
('BR', 'Brazil'),
('IO', 'British Indian Ocean Territory'),
('VG', 'British Virgin Islands'),
('BN', 'Brunei'),
('BG', 'Bulgaria'),
('BF', 'Burkina Faso'),
('BI', 'Burundi'),
('KH', 'Cambodia'),
('CM', 'Cameroon'),
('CA', 'Canada'),
('CV', 'Cape Verde'),
('KY', 'Cayman Islands'),
('CF', 'Central African Republic'),
('TD', 'Chad'),
('CL', 'Chile'),
('CN', 'China'),
('CX', 'Christmas Island'),
('CC', 'Cocos Islands'),
('CO', 'Colombia'),
('KM', 'Comoros'),
('CK', 'Cook Islands'),
('CR', 'Costa Rica'),
('HR', 'Croatia'),
('CU', 'Cuba'),
('CW', 'Curacao'),
('CY', 'Cyprus'),
('CZ', 'Czech Republic'),
('CD', 'Democratic Republic of the Congo'),
('DK', 'Denmark'),
('DJ', 'Djibouti'),
('DM', 'Dominica'),
('DO', 'Dominican Republic'),
('TL', 'East Timor'),
('EC', 'Ecuador'),
('EG', 'Egypt'),
('SV', 'El Salvador'),
('GQ', 'Equatorial Guinea'),
('ER', 'Eritrea'),
('EE', 'Estonia'),
('ET', 'Ethiopia'),
('FK', 'Falkland Islands'),
('FO', 'Faroe Islands'),
('FJ', 'Fiji'),
('FI', 'Finland'),
('FR', 'France'),
('GF', 'French Guiana'),
('PF', 'French Polynesia'),
('GA', 'Gabon'),
('GM', 'Gambia'),
('GE', 'Georgia'),
('DE', 'Germany'),
('GH', 'Ghana'),
('GI', 'Gibraltar'),
('GR', 'Greece'),
('GL', 'Greenland'),
('GD', 'Grenada'),
('GP', 'Guadeloupe'),
('GU', 'Guam'),
('GT', 'Guatemala'),
('GG', 'Guernsey'),
('GN', 'Guinea'),
('GW', 'Guinea-Bissau'),
('GY', 'Guyana'),
('HT', 'Haiti'),
('HN', 'Honduras'),
('HK', 'Hong Kong'),
('HU', 'Hungary'),
('IS', 'Iceland'),
('IN', 'India'),
('ID', 'Indonesia'),
('IR', 'Iran'),
('IQ', 'Iraq'),
('IE', 'Ireland'),
('IM', 'Isle of Man'),
('IL', 'Israel'),
('IT', 'Italy'),
('CI', 'Ivory Coast'),
('JM', 'Jamaica'),
('JP', 'Japan'),
('JE', 'Jersey'),
('JO', 'Jordan'),
('KZ', 'Kazakhstan'),
('KE', 'Kenya'),
('KI', 'Kiribati'),
('KW', 'Kuwait'),
('KG', 'Kyrgyzstan'),
('LA', 'Laos'),
('LV', 'Latvia'),
('LB', 'Lebanon'),
('LS', 'Lesotho'),
('LR', 'Liberia'),
('LY', 'Libya'),
('LI', 'Liechtenstein'),
('LT', 'Lithuania'),
('LU', 'Luxembourg'),
('MO', 'Macao'),
('MK', 'Macedonia'),
('MG', 'Madagascar'),
('MW', 'Malawi'),
('MY', 'Malaysia'),
('MV', 'Maldives'),
('ML', 'Mali'),
('MT', 'Malta'),
('MH', 'Marshall Islands'),
('MQ', 'Martinique'),
('MR', 'Mauritania'),
('MU', 'Mauritius'),
('YT', 'Mayotte'),
('MX', 'Mexico'),
('FM', 'Micronesia'),
('MD', 'Moldova'),
('MC', 'Monaco'),
('MN', 'Mongolia'),
('ME', 'Montenegro'),
('MS', 'Montserrat'),
('MA', 'Morocco'),
('MZ', 'Mozambique'),
('MM', 'Myanmar'),
('NA', 'Namibia'),
('NR', 'Nauru'),
('NP', 'Nepal'),
('NL', 'Netherlands'),
('NC', 'New Caledonia'),
('NZ', 'New Zealand'),
('NI', 'Nicaragua'),
('NE', 'Niger'),
('NG', 'Nigeria'),
('NU', 'Niue'),
('NF', 'Norfolk Island'),
('KP', 'North Korea'),
('MP', 'Northern Mariana Islands'),
('NO', 'Norway'),
('OM', 'Oman'),
('PK', 'Pakistan'),
('PW', 'Palau'),
('PS', 'Palestinian Territory'),
('PA', 'Panama'),
('PG', 'Papua New Guinea'),
('PY', 'Paraguay'),
('PE', 'Peru'),
('PH', 'Philippines'),
('PN', 'Pitcairn'),
('PL', 'Poland'),
('PT', 'Portugal'),
('PR', 'Puerto Rico'),
('QA', 'Qatar'),
('CG', 'Republic of the Congo'),
('RE', 'Reunion'),
('RO', 'Romania'),
('RU', 'Russia'),
('RW', 'Rwanda'),
('BL', 'Saint Barthelemy'),
('SH', 'Saint Helena'),
('KN', 'Saint Kitts and Nevis'),
('LC', 'Saint Lucia'),
('MF', 'Saint Martin'),
('PM', 'Saint Pierre and Miquelon'),
('VC', 'Saint Vincent and the Grenadines'),
('WS', 'Samoa'),
('SM', 'San Marino'),
('ST', 'Sao Tome and Principe'),
('SA', 'Saudi Arabia'),
('SN', 'Senegal'),
('RS', 'Serbia'),
('SC', 'Seychelles'),
('SL', 'Sierra Leone'),
('SG', 'Singapore'),
('SX', 'Sint Maarten'),
('SK', 'Slovakia'),
('SI', 'Slovenia'),
('SB', 'Solomon Islands'),
('SO', 'Somalia'),
('ZA', 'South Africa'),
('KR', 'South Korea'),
('SS', 'South Sudan'),
('ES', 'Spain'),
('LK', 'Sri Lanka'),
('SD', 'Sudan'),
('SR', 'Suriname'),
('SJ', 'Svalbard and Jan Mayen'),
('SZ', 'Swaziland'),
('SE', 'Sweden'),
('CH', 'Switzerland'),
('SY', 'Syria'),
('TW', 'Taiwan'),
('TJ', 'Tajikistan'),
('TZ', 'Tanzania'),
('TH', 'Thailand'),
('TG', 'Togo'),
('TK', 'Tokelau'),
('TO', 'Tonga'),
('TT', 'Trinidad and Tobago'),
('TN', 'Tunisia'),
('TR', 'Turkey'),
('TM', 'Turkmenistan'),
('TC', 'Turks and Caicos Islands'),
('TV', 'Tuvalu'),
('VI', 'U.S. Virgin Islands'),
('UG', 'Uganda'),
('UA', 'Ukraine'),
('AE', 'United Arab Emirates'),
('GB', 'United Kingdom'),
('US', 'United States'),
('UM', 'United States Minor Outlying Islands'),
('UY', 'Uruguay'),
('UZ', 'Uzbekistan'),
('VU', 'Vanuatu'),
('VA', 'Vatican'),
('VE', 'Venezuela'),
('VN', 'Vietnam'),
('WF', 'Wallis and Futuna'),
('EH', 'Western Sahara'),
('YE', 'Yemen'),
('ZM', 'Zambia'),
('ZW', 'Zimbabwe')
]
COUNTRY_CODE_MAPPING = {
'AD': '376',
'AE': '971',
'AF': '93',
'AG': '1-268',
'AI': '1-264',
'AL': '355',
'AM': '374',
'AO': '244',
'AQ': '672',
'AR': '54',
'AS': '1-684',
'AT': '43',
'AU': '61',
'AW': '297',
'AX': '358-18',
'AZ': '994',
'BA': '387',
'BB': '1-246',
'BD': '880',
'BE': '32',
'BF': '226',
'BG': '359',
'BH': '973',
'BI': '257',
'BJ': '229',
'BL': '590',
'BM': '1-441',
'BN': '673',
'BO': '591',
'BQ': '599',
'BR': '55',
'BS': '1-242',
'BT': '975',
'BW': '267',
'BY': '375',
'BZ': '501',
'CA': '1',
'CC': '61',
'CD': '243',
'CF': '236',
'CG': '242',
'CH': '41',
'CI': '225',
'CK': '682',
'CL': '56',
'CM': '237',
'CN': '86',
'CO': '57',
'CR': '506',
'CU': '53',
'CV': '238',
'CW': '599',
'CX': '61',
'CY': '357',
'CZ': '420',
'DE': '49',
'DJ': '253',
'DK': '45',
'DM': '1-767',
'DO': '1-809',
'DZ': '213',
'EC': '593',
'EE': '372',
'EG': '20',
'EH': '212',
'ER': '291',
'ES': '34',
'ET': '251',
'FI': '358',
'FJ': '679',
'FK': '500',
'FM': '691',
'FO': '298',
'FR': '33',
'GA': '241',
'GB': '44',
'GD': '1-473',
'GE': '995',
'GF': '594',
'GG': '44-1481',
'GH': '233',
'GI': '350',
'GL': '299',
'GM': '220',
'GN': '224',
'GP': '590',
'GQ': '240',
'GR': '30',
'GT': '502',
'GU': '1-671',
'GW': '245',
'GY': '592',
'HK': '852',
'HN': '504',
'HR': '385',
'HT': '509',
'HU': '36',
'ID': '62',
'IE': '353',
'IL': '972',
'IM': '44-1624',
'IN': '91',
'IO': '246',
'IQ': '964',
'IR': '98',
'IS': '354',
'IT': '39',
'JE': '44-1534',
'JM': '1-876',
'JO': '962',
'JP': '81',
'KE': '254',
'KG': '996',
'KH': '855',
'KI': '686',
'KM': '269',
'KN': '1-869',
'KP': '850',
'KR': '82',
'KW': '965',
'KY': '1-345',
'KZ': '7',
'LA': '856',
'LB': '961',
'LC': '1-758',
'LI': '423',
'LK': '94',
'LR': '231',
'LS': '266',
'LT': '370',
'LU': '352',
'LV': '371',
'LY': '218',
'MA': '212',
'MC': '377',
'MD': '373',
'ME': '382',
'MF': '590',
'MG': '261',
'MH': '692',
'MK': '389',
'ML': '223',
'MM': '95',
'MN': '976',
'MO': '853',
'MP': '1-670',
'MQ': '596',
'MR': '222',
'MS': '1-664',
'MT': '356',
'MU': '230',
'MV': '960',
'MW': '265',
'MX': '52',
'MY': '60',
'MZ': '258',
'NA': '264',
'NC': '687',
'NE': '227',
'NF': '672',
'NG': '234',
'NI': '505',
'NL': '31',
'NO': '47',
'NP': '977',
'NR': '674',
'NU': '683',
'NZ': '64',
'OM': '968',
'PA': '507',
'PE': '51',
'PF': '689',
'PG': '675',
'PH': '63',
'PK': '92',
'PL': '48',
'PM': '508',
'PN': '870',
'PR': '1-787',
'PS': '970',
'PT': '351',
'PW': '680',
'PY': '595',
'QA': '974',
'RE': '262',
'RO': '40',
'RS': '381',
'RU': '7',
'RW': '250',
'SA': '966',
'SB': '677',
'SC': '248',
'SD': '249',
'SE': '46',
'SG': '65',
'SH': '290',
'SI': '386',
'SJ': '47',
'SK': '421',
'SL': '232',
'SM': '378',
'SN': '221',
'SO': '252',
'SR': '597',
'SS': '211',
'ST': '239',
'SV': '503',
'SX': '599',
'SY': '963',
'SZ': '268',
'TC': '1-649',
'TD': '235',
'TG': '228',
'TH': '66',
'TJ': '992',
'TK': '690',
'TL': '670',
'TM': '993',
'TN': '216',
'TO': '676',
'TR': '90',
'TT': '1-868',
'TV': '688',
'TW': '886',
'TZ': '255',
'UA': '380',
'UG': '256',
'UM': '1',
'US': '1',
'UY': '598',
'UZ': '998',
'VA': '379',
'VC': '1-784',
'VE': '58',
'VG': '1-284',
'VI': '1-340',
'VN': '84',
'VU': '678',
'WF': '681',
'WS': '685',
'YE': '967',
'YT': '262',
'ZA': '27',
'ZM': '260',
'ZW': '263'
}
STATE_CHOICES = [
(1001, 'Andaman and Nicobar Island'),
(1002, 'Andhra Pradesh'),
(1003, 'Arunachal Pradesh'),
(1004, 'Assam'),
(1005, 'Bihar'),
(1006, 'Chandigarh'),
(1007, 'Chhattisgarh'),
(1008, 'Dadra and Nagar Haveli'),
(1009, 'Daman and Diu'),
(1010, 'Delhi'),
(1011, 'Goa'),
(1012, 'Gujarat'),
(1013, 'Haryana'),
(1014, 'Himachal Pradesh'),
(1015, 'Jammu and Kashmir'),
(1016, 'Jharkhand'),
(1017, 'Karnataka'),
(1018, 'Kerala'),
(1019, 'Lakshadweep'),
(1020, 'Madhya Pradesh'),
(1021, 'Maharashtra'),
(1022, 'Manipur'),
(1023, 'Meghalaya'),
(1024, 'Mizoram'),
(1025, 'Nagaland'),
(1026, 'Odisha'),
(1027, 'Puducherry'),
(1028, 'Punjab'),
(1029, 'Rajasthan'),
(1030, 'Sikkim'),
(1031, 'Tamil Nadu'),
(1032, 'Telangana'),
(1033, 'Tripura'),
(1034, 'Uttar Pradesh'),
(1035, 'Uttarakhand'),
(1036, 'West Bengal')
]
STATE_TO_CITY_CHOICES = {
1001: [(10001, 'Nicobar'),
(10002, 'North and Middle Andaman'),
(10003, 'South Andaman')],
1002: [(10004, 'Anantapur'),
(10005, 'Chittoor'),
(10006, 'Cuddapah'),
(10007, 'East Godavari'),
(10008, 'Guntur'),
(10009, 'Krishna'),
(10010, 'Kurnool'),
(10011, 'Nellore'),
(10012, 'Prakasam'),
(10013, 'Srikakulam'),
(10014, 'Visakhapatnam'),
(10015, 'Vizianagaram'),
(10016, 'West Godavari')],
1003: [(10017, 'Anjaw'),
(10018, 'Changlang'),
(10019, 'Dibang Valley'),
(10020, 'East Kameng'),
(10021, 'East Siang'),
(10022, 'Kurung Kumey'),
(10023, 'Lohit'),
(10024, 'Longding'),
(10025, 'Lower Dibang Valley'),
(10026, 'Lower Subansiri'),
(10027, 'Papum Pare'),
(10028, 'Tawang'),
(10029, 'Tirap'),
(10030, 'Upper Siang'),
(10031, 'Upper Subansiri'),
(10032, 'West Kameng'),
(10033, 'West Siang')],
1004: [(10034, 'Baksa'),
(10035, 'Barpeta'),
(10036, 'Bongaigaon'),
(10037, 'Cachar'),
(10038, 'Chirang'),
(10039, 'Darrang'),
(10040, 'Dhemaji'),
(10041, 'Dhubri'),
(10042, 'Dibrugarh'),
(10043, 'Dima Hasao'),
(10044, 'Goalpara'),
(10045, 'Golaghat'),
(10046, 'Hailakandi'),
(10047, 'Jorhat'),
(10049, 'Kamrup'),
(10048, 'Kamrup Metropolitan'),
(10050, 'Karbi Anglong'),
(10051, 'Karimganj'),
(10052, 'Kokrajhar'),
(10053, 'Lakhimpur'),
(10054, 'Morigaon'),
(10055, 'Nagaon'),
(10056, 'Nalbari'),
(10057, 'Sivasagar'),
(10058, 'Sonitpur'),
(10059, 'Tinsukia'),
(10060, 'Udalguri')],
1005: [(10061, 'Araria'),
(10062, 'Arwal'),
(10063, 'Aurangabad'),
(10064, 'Banka'),
(10065, 'Begusarai'),
(10066, 'Bhagalpur'),
(10067, 'Bhojpur'),
(10068, 'Buxar'),
(10069, 'Darbhanga'),
(10070, 'East Champaran (Motihari)'),
(10071, 'Gaya'),
(10072, 'Gopalganj'),
(10073, 'Jamui'),
(10074, 'Jehanabad'),
(10075, 'Kaimur (Bhabua)'),
(10076, 'Katihar'),
(10077, 'Khagaria'),
(10078, 'Kishanganj'),
(10079, 'Lakhisarai'),
(10080, 'Madhepura'),
(10081, 'Madhubani'),
(10082, 'Munger (Monghyr)'),
(10083, 'Muzaffarpur'),
(10084, 'Nalanda'),
(10085, 'Nawada'),
(10086, 'Patna'),
(10087, 'Purnia (Purnea)'),
(10088, 'Rohtas'),
(10089, 'Saharsa'),
(10090, 'Samastipur'),
(10091, 'Saran'),
(10092, 'Sheikhpura'),
(10093, 'Sheohar'),
(10094, 'Sitamarhi'),
(10095, 'Siwan'),
(10096, 'Supaul'),
(10097, 'Vaishali'),
(10098, 'West Champaran')],
1006: [(10099, 'Chandigarh')],
1007: [(10100, 'Balod'),
(10101, 'Baloda Bazar'),
(10102, 'Balrampur'),
(10103, 'Bastar'),
(10104, 'Bemetara'),
(10105, 'Bijapur'),
(10106, 'Bilaspur'),
(10107, 'Dantewada (South Bastar)'),
(10108, 'Dhamtari'),
(10109, 'Durg'),
(10110, 'Gariaband'),
(10111, 'Janjgir-Champa'),
(10112, 'Jashpur'),
(10113, 'Kabirdham (Kawardha)'),
(10114, 'Kanker (North Bastar)'),
(10115, 'Kondagaon'),
(10116, 'Korba'),
(10117, 'Korea (Koriya)'),
(10118, 'Mahasamund'),
(10119, 'Mungeli'),
(10120, 'Narayanpur'),
(10121, 'Raigarh'),
(10122, 'Raipur'),
(10123, 'Rajnandgaon'),
(10124, 'Sukma'),
(10125, 'Surajpur'),
(10126, 'Surguja')],
1008: [(10127, 'Dadra & Nagar Haveli')],
1009: [(10128, 'Daman'), (10129, 'Diu')],
1010: [(10130, 'Central Delhi'),
(10131, 'East Delhi'),
(10132, 'New Delhi'),
(10133, 'North Delhi'),
(10134, 'North East Delhi'),
(10135, 'North West Delhi'),
(10136, 'South Delhi'),
(10137, 'South West Delhi'),
(10138, 'West Delhi')],
1011: [(10139, 'North Goa'), (10140, 'South Goa')],
1012: [(10141, 'Ahmedabad'),
(10142, 'Amreli'),
(10143, 'Anand'),
(10144, 'Aravalli'),
(10145, 'Banaskantha (Palanpur)'),
(10146, 'Bharuch'),
(10147, 'Bhavnagar'),
(10148, 'Botad'),
(10149, 'Chhota Udepur'),
(10150, 'Dahod'),
(10151, 'Dangs (Ahwa)'),
(10152, 'Devbhoomi Dwarka'),
(10153, 'Gandhinagar'),
(10154, 'Gir Somnath'),
(10155, 'Jamnagar'),
(10156, 'Junagadh'),
(10157, 'Kachchh'),
(10158, 'Kheda (Nadiad)'),
(10159, 'Mahisagar'),
(10160, 'Mehsana'),
(10161, 'Morbi'),
(10162, 'Narmada (Rajpipla)'),
(10163, 'Navsari'),
(10164, 'Panchmahal (Godhra)'),
(10165, 'Patan'),
(10166, 'Porbandar'),
(10167, 'Rajkot'),
(10168, 'Sabarkantha (Himmatnagar)'),
(10169, 'Surat'),
(10170, 'Surendranagar'),
(10171, 'Tapi (Vyara)'),
(10172, 'Vadodara'),
(10173, 'Valsad')],
1013: [(10174, 'Ambala'),
(10175, 'Bhiwani'),
(10176, 'Faridabad'),
(10177, 'Fatehabad'),
(10178, 'Gurgaon'),
(10179, 'Hisar'),
(10180, 'Jhajjar'),
(10181, 'Jind'),
(10182, 'Kaithal'),
(10183, 'Karnal'),
(10184, 'Kurukshetra'),
(10185, 'Mahendragarh'),
(10186, 'Mewat'),
(10187, 'Palwal'),
(10188, 'Panchkula'),
(10189, 'Panipat'),
(10190, 'Rewari'),
(10191, 'Rohtak'),
(10192, 'Sirsa'),
(10193, 'Sonipat'),
(10194, 'Yamunanagar')],
1014: [(10195, 'Bilaspur'),
(10196, 'Chamba'),
(10197, 'Hamirpur'),
(10198, 'Kangra'),
(10199, 'Kinnaur'),
(10200, 'Kullu'),
(10201, 'Lahaul & Spiti'),
(10202, 'Mandi'),
(10203, 'Shimla'),
(10204, 'Sirmaur (Sirmour)'),
(10205, 'Solan'),
(10206, 'Una')],
1015: [(10207, 'Anantnag'),
(10208, 'Bandipora'),
(10209, 'Baramulla'),
(10210, 'Budgam'),
(10211, 'Doda'),
(10212, 'Ganderbal'),
(10213, 'Jammu'),
(10214, 'Kargil'),
(10215, 'Kathua'),
(10216, 'Kishtwar'),
(10217, 'Kulgam'),
(10218, 'Kupwara'),
(10219, 'Leh'),
(10220, 'Poonch'),
(10221, 'Pulwama'),
(10222, 'Rajouri'),
(10223, 'Ramban'),
(10224, 'Reasi'),
(10225, 'Samba'),
(10226, 'Shopian'),
(10227, 'Srinagar'),
(10228, 'Udhampur')],
1016: [(10229, 'Bokaro'),
(10230, 'Chatra'),
(10231, 'Deoghar'),
(10232, 'Dhanbad'),
(10233, 'Dumka'),
(10234, 'East Singhbhum'),
(10235, 'Garhwa'),
(10236, 'Giridih'),
(10237, 'Godda'),
(10238, 'Gumla'),
(10239, 'Hazaribag'),
(10240, 'Jamtara'),
(10241, 'Khunti'),
(10242, 'Koderma'),
(10243, 'Latehar'),
(10244, 'Lohardaga'),
(10245, 'Pakur'),
(10246, 'Palamu'),
(10247, 'Ramgarh'),
(10248, 'Ranchi'),
(10249, 'Sahibganj'),
(10250, 'Seraikela-Kharsawan'),
(10251, 'Simdega'),
(10252, 'West Singhbhum')],
1017: [(10253, 'Bagalkot'),
(10254, 'Bangalore Rural'),
(10255, 'Bangalore Urban'),
(10256, 'Belgaum'),
(10257, 'Bellary'),
(10258, 'Bidar'),
(10259, 'Bijapur'),
(10260, 'Chamarajanagar'),
(10261, 'Chickmagalur'),
(10262, 'Chikballapur'),
(10263, 'Chitradurga'),
(10264, 'Dakshina Kannada'),
(10265, 'Davangere'),
(10266, 'Dharwad'),
(10267, 'Gadag'),
(10268, 'Gulbarga'),
(10269, 'Hassan'),
(10270, 'Haveri'),
(10271, 'Kodagu'),
(10272, 'Kolar'),
(10273, 'Koppal'),
(10274, 'Mandya'),
(10275, 'Mysore'),
(10276, 'Raichur'),
(10277, 'Ramnagara'),
(10278, 'Shimoga'),
(10279, 'Tumkur'),
(10280, 'Udupi'),
(10281, 'Uttara Kannada (Karwar)'),
(10282, 'Yadgir')],
1018: [(10283, 'Alappuzha'),
(10284, 'Ernakulam'),
(10285, 'Idukki'),
(10286, 'Kannur'),
(10287, 'Kasaragod'),
(10288, 'Kollam'),
(10289, 'Kottayam'),
(10290, 'Kozhikode'),
(10291, 'Malappuram'),
(10292, 'Palakkad'),
(10293, 'Pathanamthitta'),
(10294, 'Thiruvananthapuram'),
(10295, 'Thrissur'),
(10296, 'Wayanad')],
1019: [(10297, 'Lakshadweep')],
1020: [(10298, 'Alirajpur'),
(10299, 'Anuppur'),
(10300, 'Ashoknagar'),
(10301, 'Balaghat'),
(10302, 'Barwani'),
(10303, 'Betul'),
(10304, 'Bhind'),
(10305, 'Bhopal'),
(10306, 'Burhanpur'),
(10307, 'Chhatarpur'),
(10308, 'Chhindwara'),
(10309, 'Damoh'),
(10310, 'Datia'),
(10311, 'Dewas'),
(10312, 'Dhar'),
(10313, 'Dindori'),
(10314, 'Guna'),
(10315, 'Gwalior'),
(10316, 'Harda'),
(10317, 'Hoshangabad'),
(10318, 'Indore'),
(10319, 'Jabalpur'),
(10320, 'Jhabua'),
(10321, 'Katni'),
(10322, 'Khandwa'),
(10323, 'Khargone'),
(10324, 'Mandla'),
(10325, 'Mandsaur'),
(10326, 'Morena'),
(10327, 'Narsinghpur'),
(10328, 'Neemuch'),
(10329, 'Panna'),
(10330, 'Raisen'),
(10331, 'Rajgarh'),
(10332, 'Ratlam'),
(10333, 'Rewa'),
(10334, 'Sagar'),
(10335, 'Satna'),
(10336, 'Sehore'),
(10337, 'Seoni'),
(10338, 'Shahdol'),
(10339, 'Shajapur'),
(10340, 'Sheopur'),
(10341, 'Shivpuri'),
(10342, 'Sidhi'),
(10343, 'Singrauli'),
(10344, 'Tikamgarh'),
(10345, 'Ujjain'),
(10346, 'Umaria'),
(10347, 'Vidisha')],
1021: [(10348, 'Ahmednagar'),
(10349, 'Akola'),
(10350, 'Amravati'),
(10351, 'Aurangabad'),
(10352, 'Beed'),
(10353, 'Bhandara'),
(10354, 'Buldhana'),
(10355, 'Chandrapur'),
(10356, 'Dhule'),
(10357, 'Gadchiroli'),
(10358, 'Gondia'),
(10359, 'Hingoli'),
(10360, 'Jalgaon'),
(10361, 'Jalna'),
(10362, 'Kolhapur'),
(10363, 'Latur'),
(10364, 'Mumbai City'),
(10365, 'Mumbai Suburban'),
(10366, 'Nagpur'),
(10367, 'Nanded'),
(10368, 'Nandurbar'),
(10369, 'Nashik'),
(10370, 'Osmanabad'),
(10371, 'Parbhani'),
(10372, 'Pune'),
(10373, 'Raigad'),
(10374, 'Ratnagiri'),
(10375, 'Sangli'),
(10376, 'Satara'),
(10377, 'Sindhudurg'),
(10378, 'Solapur'),
(10379, 'Thane'),
(10380, 'Wardha'),
(10381, 'Washim'),
(10382, 'Yavatmal')],
1022: [(10383, 'Bishnupur'),
(10384, 'Chandel'),
(10385, 'Churachandpur'),
(10386, 'Imphal East'),
(10387, 'Imphal West'),
(10388, 'Senapati'),
(10389, 'Tamenglong'),
(10390, 'Thoubal'),
(10391, 'Ukhrul')],
1023: [(10392, 'East Garo Hills'),
(10393, 'East Jaintia Hills'),
(10394, 'East Khasi Hills'),
(10395, 'North Garo Hills'),
(10396, 'Ri Bhoi'),
(10397, 'South Garo Hills'),
(10398, 'South West Garo Hills'),
(10399, 'South West Khasi Hills'),
(10400, 'West Garo Hills'),
(10401, 'West Jaintia Hills'),
(10402, 'West Khasi Hills')],
1024: [(10403, 'Aizawl'),
(10404, 'Champhai'),
(10405, 'Kolasib'),
(10406, 'Lawngtlai'),
(10407, 'Lunglei'),
(10408, 'Mamit'),
(10409, 'Saiha'),
(10410, 'Serchhip')],
1025: [(10411, 'Dimapur'),
(10412, 'Kiphire'),
(10413, 'Kohima'),
(10414, 'Longleng'),
(10415, 'Mokokchung'),
(10416, 'Mon'),
(10417, 'Peren'),
(10418, 'Phek'),
(10419, 'Tuensang'),
(10420, 'Wokha'),
(10421, 'Zunheboto')],
1026: [(10422, 'Angul'),
(10423, 'Balangir'),
(10424, 'Balasore'),
(10425, 'Bargarh'),
(10426, 'Bhadrak'),
(10427, 'Boudh'),
(10428, 'Cuttack'),
(10429, 'Deogarh'),
(10430, 'Dhenkanal'),
(10431, 'Gajapati'),
(10432, 'Ganjam'),
(10433, 'Jagatsinghapur'),
(10434, 'Jajpur'),
(10435, 'Jharsuguda'),
(10436, 'Kalahandi'),
(10437, 'Kandhamal'),
(10438, 'Kendrapara'),
(10439, 'Kendujhar (Keonjhar)'),
(10440, 'Khordha'),
(10441, 'Koraput'),
(10442, 'Malkangiri'),
(10443, 'Mayurbhanj'),
(10444, 'Nabarangpur'),
(10445, 'Nayagarh'),
(10446, 'Nuapada'),
(10447, 'Puri'),
(10448, 'Rayagada'),
(10449, 'Sambalpur'),
(10450, 'Sonepur'),
(10451, 'Sundargarh')],
1027: [(10452, 'Karaikal'),
(10453, 'Mahe'),
(10454, 'Pondicherry'),
(10455, 'Yanam')],
1028: [(10456, 'Amritsar'),
(10457, 'Barnala'),
(10458, 'Bathinda'),
(10459, 'Faridkot'),
(10460, 'Fatehgarh Sahib'),
(10461, 'Fazilka'),
(10462, 'Ferozepur'),
(10463, 'Gurdaspur'),
(10464, 'Hoshiarpur'),
(10465, 'Jalandhar'),
(10466, 'Kapurthala'),
(10467, 'Ludhiana'),
(10468, 'Mansa'),
(10469, 'Moga'),
(10470, 'Muktsar'),
(10471, 'Nawanshahr'),
(10472, 'Pathankot'),
(10473, 'Patiala'),
(10474, 'Rupnagar'),
(10476, 'SAS Nagar (Mohali)'),
(10475, 'Sangrur'),
(10477, 'Tarn Taran')],
1029: [(10478, 'Ajmer'),
(10479, 'Alwar'),
(10480, 'Banswara'),
(10481, 'Baran'),
(10482, 'Barmer'),
(10483, 'Bharatpur'),
(10484, 'Bhilwara'),
(10485, 'Bikaner'),
(10486, 'Bundi'),
(10487, 'Chittorgarh'),
(10488, 'Churu'),
(10489, 'Dausa'),
(10490, 'Dholpur'),
(10491, 'Dungarpur'),
(10492, 'Hanumangarh'),
(10493, 'Jaipur'),
(10494, 'Jaisalmer'),
(10495, 'Jalore'),
(10496, 'Jhalawar'),
(10497, 'Jhunjhunu'),
(10498, 'Jodhpur'),
(10499, 'Karauli'),
(10500, 'Kota'),
(10501, 'Nagaur'),
(10502, 'Pali'),
(10503, 'Pratapgarh'),
(10504, 'Rajsamand'),
(10505, 'Sawai Madhopur'),
(10506, 'Sikar'),
(10507, 'Sirohi'),
(10508, 'Sri Ganganagar'),
(10509, 'Tonk'),
(10510, 'Udaipur')],
1030: [(10511, 'East Sikkim'),
(10512, 'North Sikkim'),
(10513, 'South Sikkim'),
(10514, 'West Sikkim')],
1031: [(10515, 'Ariyalur'),
(10516, 'Chennai'),
(10517, 'Coimbatore'),
(10518, 'Cuddalore'),
(10519, 'Dharmapuri'),
(10520, 'Dindigul'),
(10521, 'Erode'),
(10522, 'Kanchipuram'),
(10523, 'Kanyakumari'),
(10524, 'Karur'),
(10525, 'Krishnagiri'),
(10526, 'Madurai'),
(10527, 'Nagapattinam'),
(10528, 'Namakkal'),
(10529, 'Nilgiris'),
(10530, 'Perambalur'),
(10531, 'Pudukkottai'),
(10532, 'Ramanathapuram'),
(10533, 'Salem'),
(10534, 'Sivaganga'),
(10535, 'Thanjavur'),
(10536, 'Theni'),
(10537, 'Thoothukudi (Tuticorin)'),
(10538, 'Tiruchirappalli'),
(10539, 'Tirunelveli'),
(10540, 'Tiruppur'),
(10541, 'Tiruvallur'),
(10542, 'Tiruvannamalai'),
(10543, 'Tiruvarur'),
(10544, 'Vellore'),
(10545, 'Viluppuram'),
(10546, 'Virudhunagar')],
1032: [(10547, 'Adilabad'),
(10548, 'Hyderabad'),
(10549, 'Karimnagar'),
(10550, 'Khammam'),
(10551, 'Mahabubnagar'),
(10552, 'Medak'),
(10553, 'Nalgonda'),
(10554, 'Nizamabad'),
(10555, 'Rangareddy'),
(10556, 'Warangal')],
1033: [(10557, 'Dhalai'),
(10558, 'Gomati'),
(10559, 'Khowai'),
(10560, 'North Tripura'),
(10561, 'Sepahijala'),
(10562, 'South Tripura'),
(10563, 'Unakoti'),
(10564, 'West Tripura')],
1034: [(10565, 'Agra'),
(10566, 'Aligarh'),
(10567, 'Allahabad'),
(10568, 'Ambedkar Nagar'),
(10569, 'Auraiya'),
(10570, 'Azamgarh'),
(10571, 'Baghpat'),
(10572, 'Bahraich'),
(10573, 'Ballia'),
(10574, 'Balrampur'),
(10575, 'Banda'),
(10576, 'Barabanki'),
(10577, 'Bareilly'),
(10578, 'Basti'),
(10579, 'Bhim Nagar'),
(10580, 'Bijnor'),
(10581, 'Budaun'),
(10582, 'Bulandshahr'),
(10583, 'Chandauli'),
(10584, 'Chatrapati Sahuji Mahraj Nagar'),
(10585, 'Chitrakoot'),
(10586, 'Deoria'),
(10587, 'Etah'),
(10588, 'Etawah'),
(10589, 'Faizabad'),
(10590, 'Farrukhabad'),
(10591, 'Fatehpur'),
(10592, 'Firozabad'),
(10593, 'Gautam Buddha Nagar'),
(10671, 'Noida'), # Used ad People Dont Know GBNagar
(10594, 'Ghaziabad'),
(10595, 'Ghazipur'),
(10596, 'Gonda'),
(10597, 'Gorakhpur'),
(10598, 'Hamirpur'),
(10599, 'Hardoi'),
(10600, 'Hathras'),
(10601, 'Jalaun'),
(10602, 'Jaunpur'),
(10603, 'Jhansi'),
(10604, 'Jyotiba Phule Nagar (J.P. Nagar)'),
(10605, 'Kannauj'),
(10606, 'Kanpur Dehat'),
(10607, 'Kanpur Nagar'),
(10608, 'Kanshiram Nagar (Kasganj)'),
(10609, 'Kaushambi'),
(10610, 'Kushinagar (Padrauna)'),
(10611, 'Lakhimpur - Kheri'),
(10612, 'Lalitpur'),
(10613, 'Lucknow'),
(10614, 'Maharajganj'),
(10615, 'Mahoba'),
(10616, 'Mainpuri'),
(10617, 'Mathura'),
(10618, 'Mau'),
(10619, 'Meerut'),
(10620, 'Mirzapur'),
(10621, 'Moradabad'),
(10622, 'Muzaffarnagar'),
(10623, 'Panchsheel Nagar'),
(10624, 'Pilibhit'),
(10625, 'Prabuddh Nagar'),
(10626, 'Pratapgarh'),
(10627, 'RaeBareli'),
(10628, 'Rampur'),
(10629, 'Saharanpur'),
(10630, 'Sant Kabir Nagar'),
(10631, 'Sant Ravidas Nagar'),
(10632, 'Shahjahanpur'),
(10633, 'Shravasti'),
(10634, 'Siddharth Nagar'),
(10635, 'Sitapur'),
(10636, 'Sonbhadra'),
(10637, 'Sultanpur'),
(10638, 'Unnao'),
(10639, 'Varanasi')],
1035: [(10640, 'Almora'),
(10641, 'Bageshwar'),
(10642, 'Chamoli'),
(10643, 'Champawat'),
(10644, 'Dehradun'),
(10645, 'Haridwar'),
(10646, 'Nainital'),
(10647, 'Pauri Garhwal'),
(10648, 'Pithoragarh'),
(10649, 'Rudraprayag'),
(10650, 'Tehri Garhwal'),
(10651, 'Udham Singh Nagar'),
(10652, 'Uttarkashi')],
1036: [(10653, 'Bankura'),
(10654, 'Birbhum'),
(10655, 'Burdwan (Bardhaman)'),
(10656, 'Cooch Behar'),
(10657, 'Dakshin Dinajpur (South Dinajpur)'),
(10658, 'Darjeeling'),
(10659, 'Hooghly'),
(10660, 'Howrah'),
(10661, 'Jalpaiguri'),
(10662, 'Kolkata'),
(10663, 'Malda'),
(10664, 'Murshidabad'),
(10665, 'Nadia'),
(10666, 'North 24 Parganas'),
(10667, 'Paschim Medinipur (West Medinipur)'),
(10668, 'Purba Medinipur (East Medinipur)'),
(10669, 'Purulia'),
(10670, 'South 24 Parganas'),
(10671, 'Uttar Dinajpur (North Dinajpur)')]
}
| true
|
2b2ce7a072f3f65793f96129ffb457aedf616573
|
Python
|
wjidea/pythonExercise
|
/10_plu_list_in_dict.py
|
UTF-8
| 797
| 3.59375
| 4
|
[] |
no_license
|
#! /usr/bin/python
# 10_plu_list_in_dict.py
# parse the fruit and vegies file using the split function,and store them
# in a dictionary. Key is the PLU code, and price and names will be in a list
# Jie Wang
# September 1, 2016
# Read the file and parse them into Dict
filePath = '../fruits_veggies.txt'
FILE_1 = open(filePath, 'r')
marketDict = {}
for line in FILE_1.readlines():
lineT = line.rstrip().split()
plu, fruit, price = lineT[3], lineT[0], lineT[2]
marketDict[plu] = [fruit, price]
# print('{0}\t{1}'.format(plu, fruit))
FILE_1.close()
# write dict into a text file
newFilePath = '../plu_codes_and_fruit_veggie_prices.txt'
FILE_2 = open(newFilePath, 'w')
for key in marketDict.keys():
FILE_2.write('{0}\t{1}\n'.format(key, marketDict[key][1]))
FILE_2.close()
| true
|
bb0b5e9775ecdfe6eae3354899b31dc5d23b5a13
|
Python
|
moming2k/TradingProjects
|
/HKHorseDB/library/horseDataCache.py
|
UTF-8
| 1,580
| 2.5625
| 3
|
[] |
no_license
|
import os
import sys
import urllib
sys.path.append(os.path.join(os.getcwd(), '..'))
# sys.setdefaultencoding('utf-8')
from bs4 import BeautifulSoup
from selenium import webdriver
from constant import path_info
class HorseDataCache():
def __init__(self):
self.browser = None
self.encoding = 'utf-8'
current_path = os.getcwd()
project_path = os.path.dirname(current_path)
self.html_cache = project_path + "/data/cache"
def get_html_cache_path(self):
return self.html_cache
def get_cache_path(self, url):
url_path = urllib.parse.quote(url).replace('/', '_')
file_path = "{}/{}".format(self.html_cache, url_path)
return file_path
def is_cache_html(self, url):
filepath = self.get_cache_path(url)
if (os.path.isfile(filepath)):
return True
else:
return False
def get_cache_html(self, url, debug = False):
filepath = self.get_cache_path(url)
if (os.path.isfile(filepath)):
if(debug):
print("url = {} exist in cache".format(url))
with open(filepath, 'r', encoding=self.encoding) as io_file:
html = io_file.read()
return html
else:
if (debug):
print("url = {} not exist in cache".format(url))
return None
def save_cache_html(self, url, html):
filepath = self.get_cache_path(url)
with open(filepath, 'w', encoding=self.encoding) as out:
out.write(html)
return True
| true
|
373f008723d6a14e48c298dc2f7afc8972aeee43
|
Python
|
trungne/dictionary
|
/dictionary/test.py
|
UTF-8
| 779
| 2.84375
| 3
|
[] |
no_license
|
import requests
r = requests.get('https://api.dictionaryapi.dev/api/v2/entries/en_US/set')
my_obj = r.json()
for i in my_obj[0]['meanings']:
print(f"part of speech: {i['partOfSpeech']}")
for definition in i['definitions']:
if key == "definition":
print(value)
elif key == "example":
print(value)
elif key == "synonyms":
for synonym in synonyms:
print(synonym)
else:
pass
{'partOfSpeech': 'noun',
'definitions': [
{'definition': 'An utterance of “hello”; a greeting.',
'synonyms': ['greeting', 'welcome', 'salutation', 'saluting', 'hailing', 'address', 'hello', 'hallo'],
'example': 'she was getting polite nods and hellos from people'
}
]
}
| true
|
c2223503feec171ec0c9db7281ca9d9dee576d66
|
Python
|
jas10220831/SWEA-Algorithm
|
/0819/4871_그래프경로/s1.py
|
UTF-8
| 371
| 2.765625
| 3
|
[] |
no_license
|
import sys
sys.stdin = open('sample_input.txt')
# 경로 행렬만들기
T = int(input())
dot, line = map(int, input().split())
road = [[0] * (dot+1) for _ in range(dot+1)]
for _ in range(line):
dot1, dot2 = map(int, input().split())
road[dot1][dot2] += 1
start, goal = map(int, input().split())
def find_road(road, dot, start, goal):
dot_check = [0] * dot
| true
|
b32994b1286f36919c881afa1a3d450c09b915e1
|
Python
|
zongqi-wang/Beer-Advocate-Scraper
|
/beer_scraper/pipelines.py
|
UTF-8
| 1,689
| 2.609375
| 3
|
[] |
no_license
|
# -*- coding: utf-8 -*-
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://doc.scrapy.org/en/latest/topics/item-pipeline.html
from scrapy.exceptions import DropItem
from scrapy.exporters import CsvItemExporter
import csv
class BeerScraperPipeline(object):
def process_item(self, item, spider):
return item
def item_type(item):
return type(item).__name__.replace('Item','').lower()
class DuplicatesPipeline(object):
def __init__(self):
self.ids_seen = set()
def process_item(self, item, spider):
if(item_type(item) == 'beerinfo'):
if item['beer_number'] in self.ids_seen:
raise DropItem("Duplicate item found: %s" % item)
else:
self.ids_seen.add(item['beer_number'])
return item
else:
return item
class MultiCSVItemPipeline(object):
SaveTypes = ['comment', 'beerinfo', 'breweryinfo']
def open_spider(self, spider):
self.type_to_exporter = {}
def close_spider(self, spider):
for exporter in self.type_to_exporter.values():
exporter.finish_exporting()
def _exporter_for_item(self, item):
name = item_type(item)
if name not in self.type_to_exporter:
f = open('{}.csv'.format(name), 'wb')
exporter = CsvItemExporter(f)
exporter.start_exporting()
self.type_to_exporter[name] = exporter
return self.type_to_exporter[name]
def process_item(self, item, spider):
exporter = self._exporter_for_item(item)
exporter.export_item(item)
return item
| true
|
95d81c3c2e5b5dded18c3aae6c0e6a70ccb9eee6
|
Python
|
zhang2639/docker_dedup
|
/storage/io.py
|
UTF-8
| 1,089
| 2.828125
| 3
|
[] |
no_license
|
def read_chunks_from_file(path, length):
with open(path, 'rb', buffering=1024*64) as fin:
for i, j, k in length:
piece = fin.read(j)
if not piece:
return
yield piece
def write_chunks_to_file(path, block_gen):
with open(path, 'wb') as fout:
for block in block_gen:
fout.write(block)
# def read_file_part(path, offset, nb_bytes):
# with open(path, 'rb') as fin:
# fin.seek(offset)
# data = fin.read(nb_bytes)
# return data
# def write_file_part(path, offset, data):
# with open(path, 'r+b') as fout:
# fout.seek(offset)
# fout.write(data)
# fout.flush()
# def create_file(path):
# open(path, 'w').close()
def decompress_file(infile, outfile, compressor):
block_size = 2**24
block_gen = read_chunks_from_file(infile, block_size)
comp_block_gen = compressor.streaming_decompression(block_gen)
write_chunks_to_file(outfile, comp_block_gen)
| true
|
1d8a0f171f640df4dbcdf226ffddd0f9474195bc
|
Python
|
pratikshirsathp/YTseries-dsalgo
|
/binary_search.py
|
UTF-8
| 498
| 4
| 4
|
[] |
no_license
|
#should have sorted list
def binary_search(list, target):
first = 0
last = len(list)-1
while first<=last:
midpoint = (first+last)//2
if list[midpoint] == target:
return midpoint
elif list[midpoint] < target:
first = midpoint +1
else:
last = midpoint -1
return None
def verify(index):
if index is not None:
print("index is ",index)
else:
print("not found")
lis = [1,2,3,4,5,6,7,8,9,10]
result = binary_search(lis, 5)
verify(result)
| true
|
ff646d101df1be526fb9bf0d65a59155618d7037
|
Python
|
cenbow/UESTC-FinalProject
|
/src/utils/test_cached.py
|
UTF-8
| 600
| 2.828125
| 3
|
[] |
no_license
|
from unittest import TestCase
import os
import pickle as pkl
from cached import cached
import shutil
class TestCached(TestCase):
def setUp(self):
shutil.rmtree('./cache')
os.mkdir('./cache')
def test_cached(self):
@cached('test')
def build_tuple(n):
return tuple(range(n))
target = (0, 1, 2, 3, 4)
self.assertTupleEqual(target, build_tuple(5))
self.assertTrue(os.path.exists('./cache/test.pkl'))
with open('./cache/test.pkl', 'rb') as file:
tmp = pkl.load(file)
self.assertEqual(target, tmp)
| true
|
f8bbc827d96cbf36560cb041c86e6ff83929e935
|
Python
|
zhouwangyiteng/python100
|
/t14.py
|
UTF-8
| 429
| 3.53125
| 4
|
[] |
no_license
|
# _*_ coding: UTF-8 _*_
import math
def isNotPrime(num):
k = int(math.sqrt(num)) + 1
for i in range(2, k):
if num%i == 0:
return True
return False
n = int(raw_input('Input n:'))
print n, '=',
result = []
t = 2
while(n!=1):
while(n%t==0):
n /= t
result.append(t)
t += 1
while(isNotPrime(t)):
t += 1
print result[0],
for i in result[1:]:
print '*', i,
| true
|
0ae6f431dbd05ae13919b12c669990c9e4b92a66
|
Python
|
hchandaria/UCB_MIDS_W261
|
/hw3/combiner.py
|
UTF-8
| 797
| 3.34375
| 3
|
[] |
no_license
|
#!/usr/bin/python
#HW3.2c In this question, we will emit a counter for everytime the combiner is called.
#the combiner will do intermediate aggregation of data and is similar to reducer in terms of logic
import sys
from csv import reader
sys.stderr.write("reporter:counter:HW_32c,num_combiners,1\n")
last_key = None
word = None
total_count = 0
# input comes from STDIN (standard input)
for token in reader(sys.stdin):
word=token[0]
count = int(token[1])
#if current key is same as last_key than increment count
if(last_key == word):
total_count += int(count)
else:
if (last_key):
print '"%s",%s' %(last_key,total_count)
total_count = int(count)
last_key = word
if last_key == word:
print '"%s",%s' %(last_key,total_count)
| true
|
310279601a4afd2acf51c784b7552e2d34305fb5
|
Python
|
PsychicWaffle/4156project
|
/code/tests/test_validity_checker.py
|
UTF-8
| 2,046
| 2.71875
| 3
|
[] |
no_license
|
import unittest
from app import validity_checker
class ValidityCheckerClass(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_valid_history_date_range_1(self):
start_date = -1
end_date = 100
valid_date_range = validity_checker.valid_history_date_range(start_date, end_date)
self.assertTrue(valid_date_range == False)
def test_valid_history_date_range_2(self):
start_date = 100
end_date = 200
valid_date_range = validity_checker.valid_history_date_range(start_date, end_date)
self.assertTrue(valid_date_range == True)
def test_valid_history_date_range_3(self):
start_date = 100
end_date = 50
valid_date_range = validity_checker.valid_history_date_range(start_date, end_date)
self.assertTrue(valid_date_range == False)
def test_order_size_1(self):
big_order_size = validity_checker.MAX_ORDER_SIZE
big_order_size = big_order_size + 1
valid_order = validity_checker.valid_order_parameters(big_order_size)
self.assertTrue(valid_order == False)
def test_order_size_2(self):
order_size = validity_checker.MAX_ORDER_SIZE - 1
valid_order = validity_checker.valid_order_parameters(order_size)
self.assertTrue(valid_order == True)
def test_order_username_1(self):
username = "A"
valid_order = validity_checker.valid_username(username)
self.assertTrue(valid_order == False)
def test_order_username_2(self):
username = "Andrew"
valid_order = validity_checker.valid_username(username)
self.assertTrue(valid_order == True)
def test_order_password_1(self):
password = "a"
valid_order = validity_checker.valid_username(password)
self.assertTrue(valid_order == False)
def test_order_username_2(self):
password = "dklfjdkfjl"
valid_order = validity_checker.valid_username(password)
self.assertTrue(valid_order == True)
| true
|
e160da307538f15ac09bd0bdd955198ad383d9c4
|
Python
|
danielfrg/dbplot
|
/dbplot/calculations.py
|
UTF-8
| 833
| 2.71875
| 3
|
[
"Apache-2.0"
] |
permissive
|
import ibis
import numpy as np
import pandas as pd
def hist(table, column, nbins=10, binwidth=None):
if nbins is None and binwidth is None:
raise ValueError("Must indicate nbins or binwidth")
elif nbins is None and binwidth is not None:
raise ValueError("nbins and binwidth are mutually exclusive")
min_, max_ = table[column].min().execute(), table[column].max().execute()
min_, max_ = float(min_), float(max_) # From numpy.float to python.float
if binwidth is None:
binwidth = (max_ - min_) / (nbins)
buckets = [min_ + i * binwidth for i in range(nbins + 1)]
bucketed = table[table[column] != ibis.null()][column].bucket(buckets).name("bucket")
bucket_counts = bucketed.value_counts().execute()
weights = bucket_counts["count"].values
return weights, buckets
| true
|
9af3d1655e72b8c45da52483e95302c2e9b0daae
|
Python
|
HBinhCT/Q-project
|
/hackerearth/Math/Number Theory/Basic Number Theory-1/Candy Distribution 3/solution.py
|
UTF-8
| 433
| 2.640625
| 3
|
[
"MIT"
] |
permissive
|
from sys import stdin
mod = 1000000007
toffees = []
t = int(stdin.readline())
for _ in range(t):
toffees.append(int(stdin.readline()))
size = max(toffees) + 2
comb_x2 = [1, 2]
comb_x3 = [1, 3]
for i in range(2, size):
comb_x2.append(comb_x2[i - 1] * 2 % mod)
comb_x3.append(comb_x3[i - 1] * 3 % mod)
for n in toffees:
print(0 if n == 1 else (comb_x2[n] * comb_x2[n] % mod - 2 * comb_x3[n] % mod + comb_x2[n]) % mod)
| true
|
ee51a7f65e741bb86a01299489ddbd847606aead
|
Python
|
karolinanikolova/SoftUni-Software-Engineering
|
/2-Python-Fundamentals (Jan 2021)/Course-Exercises-and-Exams/07-Dictionaries/01_Lab/01-Bakery.py
|
UTF-8
| 653
| 4.34375
| 4
|
[
"MIT"
] |
permissive
|
# 1. Bakery
# This is your first task in your new job. You were tasked to create a list of the stock in a bakery and you really don't want to fail at you first day at work.
# You will receive a single line containing some food (keys) and quantities (values).
# They will be separated by a single space (the first element is the key, the second – the value and so on).
# Create a dictionary with all the keys and values and print it on the console
data = input().split()
products = {}
for index in range(0, len(data), 2):
current_product = data[index]
quantity = int(data[index + 1])
products[current_product] = quantity
print(products)
| true
|
899d606ee645f82405a9967bc98c2d5320a62312
|
Python
|
HUANGZHIHAO1994/climate_change
|
/wosspider2.2/wosspider/seleniumurl.py
|
UTF-8
| 3,090
| 2.515625
| 3
|
[] |
no_license
|
from selenium import webdriver
import time
from selenium.webdriver.support.wait import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By
from selenium.webdriver.common.keys import Keys
from scrapy.http.response.html import HtmlResponse
from selenium.webdriver.chrome.options import Options
def Selenium_article_url(art):
# def __init__(self):
url = ''
driver = webdriver.Chrome()
# chrome_options = Options()
# chrome_options.add_argument('--headless')
# chrome_options.add_argument('--disable-gpu')
# driver = webdriver.Chrome(chrome_options=chrome_options)
driver.get('http://apps.webofknowledge.com')
WebDriverWait(driver, 100, 0.5).until(
EC.presence_of_element_located((By.XPATH, "//div")))
# time.sleep(2)
try:
input1 = driver.find_element_by_xpath("//div[@class='search-criteria-input-wr']/input")
input1.send_keys(art)
except:
pass
time.sleep(0.3)
try:
# [position()<3]
selector = driver.find_element_by_xpath("//span[@class='selection']/span[@class='select2-selection select2-selection--single']//span[@id='select2-select1-container']")
selector.click()
time.sleep(0.3)
selector2 = driver.find_element_by_xpath("//input[@class='select2-search__field']")
# selector2.send_keys('Title') # 打开WOS是英文就用这个
selector2.send_keys('标题') # 打开WOS是中文的就用它
time.sleep(0.3)
selector2.send_keys(Keys.ENTER)
except:
pass
time.sleep(0.3)
try:
button = driver.find_element_by_xpath("//button[@class='large-button primary-button margin-left-10']")
button.click()
except:
pass
# time.sleep(1)
WebDriverWait(driver, 30, 0.5).until(
EC.presence_of_element_located((By.XPATH, "//div")))
try:
print('=' * 30)
print(driver.current_url)
# # 方法一:
# button2 = driver.find_element_by_xpath("//div[@class='search-results-content']//a/value")
# button2.click()
# time.sleep(10)
# url = driver.current_url
# print('=' * 30)
# print(url)
#方法二:
urls_pre = driver.find_element_by_xpath("//div[@class='search-results-content']//a[@class='smallV110 snowplow-full-record']")
url = urls_pre.get_attribute("href")
print("方法二")
print('=' * 30)
print(url)
# for a in impact:
# print(a)
# print('*'*30)
# print(type(a))
# print(impact)
# print(type(impact))
except:
print("本文WOS上没有:", art)
# url = driver.current_url
# print('=' * 30)
# print(url)
response = HtmlResponse(url=url, body=driver.page_source, encoding='utf-8')
driver.close()
# source = driver.page_source
return url, response
if __name__ == '__main__':
art = 'Geomorphology of the Upper General River Basin, Costa Rica'
Selenium_article_url(art)
| true
|
17e88847dc47a9879f337be4f05c62cf54604447
|
Python
|
GangLi-0814/PyStaData
|
/Python/Python_NLP_Basic/社调行业和职业自动编码/社会经济调查行业和职业自动编码模型代码/基于卷积神经网络的社会经济调查行业和职业自动编码模型/splitClass.py
|
UTF-8
| 3,137
| 2.96875
| 3
|
[] |
no_license
|
# coding=utf-8
import pandas as pd
import numpy as np
# 职业训练集,验证集和测试集
occfiles = [r'data/occ/occ_train.txt',r'data/occ/occ_val.txt',r'data/occ/occ_test.txt']
# 分割为高2位,中2位和低2位
count = 0
for occfile in occfiles:
count = count+1
df = pd.DataFrame(pd.read_table(occfile,sep='\t',encoding="utf_8_sig",names=['1','2']))
df1 = df.copy()
df2 = df.copy()
df3 = df.copy()
df = df["1"].values
a0 = []
a1 = []
a2 = []
for r in range(0,df.shape[0]):
if u'0' == df[r]:
x = u'000000'
else:
x = unicode(str(df[r]))
a0.append(x[len(x)-2:len(x)]) # 低2
a1.append(x[len(x)-4:len(x)-2]) # 中2
a2.append(x[:len(x)-4]) # 高2
df1["1"] = a0
df2["1"] = a1
df3["1"] = a2
if 1 == count :
df1.to_csv('data/occ/occ_train_12.txt',index=False,sep='\t',encoding="utf_8_sig",header=0)
df2.to_csv('data/occ/occ_train_34.txt',index=False,sep='\t',encoding="utf_8_sig",header=0)
df3.to_csv('data/occ/occ_train_56.txt',index=False,sep='\t',encoding="utf_8_sig",header=0)
elif 2 == count :
df1.to_csv('data/occ/occ_val_12.txt',index=False,sep='\t',encoding="utf_8_sig",header=0)
df2.to_csv('data/occ/occ_val_34.txt',index=False,sep='\t',encoding="utf_8_sig",header=0)
df3.to_csv('data/occ/occ_val_56.txt',index=False,sep='\t',encoding="utf_8_sig",header=0)
else :
df1.to_csv('data/occ/occ_test_12.txt',index=False,sep='\t',encoding="utf_8_sig",header=0)
df2.to_csv('data/occ/occ_test_34.txt',index=False,sep='\t',encoding="utf_8_sig",header=0)
df3.to_csv('data/occ/occ_test_56.txt',index=False,sep='\t',encoding="utf_8_sig",header=0)
# 行业训练集,验证集和测试集
indfiles = [r'data/ind/ind_train.txt',r'data/ind/ind_val.txt',r'data/ind/ind_test.txt']
# 分割为高2位,中2位和低2位
count = 0
for indfile in indfiles:
count = count+1
dfs = pd.DataFrame(pd.read_table(indfile,sep='\t',encoding="utf_8_sig",names=['1','2']))
df4 = pd.DataFrame(dfs)
df5 = pd.DataFrame(dfs)
df4 = dfs.copy()
df5 = dfs.copy()
dfs = dfs["1"].values
b1 = []
b2 = []
for r1 in range(0,dfs.shape[0]):
if u'0' == dfs[r1]:
x = u'000000'
else:
x = unicode(dfs[r1])
b1.append(x[len(x)-4:len(x)-2])#中2
b2.append(x[:len(x)-4])#高2
df4["1"] = b1
df5["1"] = b2
if 1 == count :
df4.to_csv('data/ind/ind_train_34.txt',index=False,sep='\t',encoding="utf_8_sig",header=0)
df5.to_csv('data/ind/ind_train_56.txt',index=False,sep='\t',encoding="utf_8_sig",header=0)
elif 2 == count :
df4.to_csv('data/ind/ind_val_34.txt',index=False,sep='\t',encoding="utf_8_sig",header=0)
df5.to_csv('data/ind/ind_val_56.txt',index=False,sep='\t',encoding="utf_8_sig",header=0)
else :
df4.to_csv('data/ind/ind_test_34.txt',index=False,sep='\t',encoding="utf_8_sig",header=0)
df5.to_csv('data/ind/ind_test_56.txt',index=False,sep='\t',encoding="utf_8_sig",header=0)
| true
|
9d322987ff50195fc7d33094b6766b4995c462b2
|
Python
|
kidonrage/FESTU_Web
|
/PR_12/cgi-bin/my_database/add_entry.py
|
UTF-8
| 2,382
| 2.953125
| 3
|
[] |
no_license
|
#!/usr/bin/env python3
print("Content-type: text/html")
print()
print("<!DOCTYPE html>")
print("<html lang='en'>")
print("<head>")
print("<meta charset='UTF-8'>")
print("<title>Добавить запись</title>")
print("</head>")
print("<body>")
print("<form action='add_entry_handler.py' method='post' enctype='multipart/form-data'>")
print("<h1>Заполните анкету:</h1>")
print("<p>Введите ваше ФИО:")
print("<input type='text' name='NAME' required>")
print("</p>")
print("<p>Введите пароль:")
print("<input type='password' name='PASS' required>")
print("</p>")
print("<p>Ваш род занятий:")
print("<select name='OCCUPATION'>")
print("<option value='Инф. Технологии' selected> Инф. технологии</option>")
print("<option value='Строительство' > Строительство</option>")
print("<option value='Бизнес'> Бизнес</option>")
print("</select>")
print("</p>")
print("<p>Пол:")
print("<input type='radio' name='GENDER' value='Мужской' checked>Мужской</input>")
print("<input type='radio' name='GENDER' value='Женский'>Женский</input>")
print("</p>")
print("<p>Сведения об образовании:</p>")
print("<textarea name='EDUCATION_INFO' placeholder='Ваши сведения здесь' cols=45 rows=3 maxlength=50 ></textarea>")
print("<p></p>")
print("<a>Ваши предпочтения:</a> <input name='WORK' value='Всё равно' style='margin-left:100px;' type='checkbox' checked>Всё равно</input> <br>")
print("<a>(один или несколько вариантов)</a> <input name='WORK' value='Работа с клиентами' style='margin-left:23px;' type='checkbox'>Работа с клиентами</input> <br>")
print("<input name='WORK' value='Работа с документами' style='margin-left:245px;' type='checkbox'>Работа с документами</input> <br>")
print("<input name='WORK' value = 'Работа в одиночку' style='margin-left:245px;' type='checkbox'>Работа в одиночку</input> <br> <br>")
print("<button style='margin-left:200px;' type='reset'>Очистить</button> <button type='submit'>Подтвердить</button> <br> <br>")
print("</form>")
print("</body>")
print("</html>")
| true
|
edc37208a2ccd3f4833a04950446166c5c1727b6
|
Python
|
yuri10/TCC_TCE
|
/tcc_lsi_grupos.py
|
UTF-8
| 13,324
| 3.109375
| 3
|
[] |
no_license
|
# -*- coding: utf-8 -*-
"""
Created on Fri Apr 5 09:59:35 2019
@author: yoliveira\
"""
import pandas as pd #dataframe manipulations
import nltk #tokenizer
import re #re.sub() - Data Cleaner
import unidecode #remover acentos
import gc #garbage collector (para remover variaveis da memória que não estão sendo mais utilizadas)
'''
Tratamento dos dados:
*Converte todas as palavras para letra minuscula
*Remove acentos
*Remove caracteres especiais e numeros
*Remove StopWords
*Tokenizing
*Stemming
*Remove palavras de tamanho < 3
*Lista que alimenta LSI
'''
#Lê os dados do arquivo CSV
df = pd.read_excel("C:/Users/Yuri Oliveira/Desktop/TCC_TCE/tabela_codigo_do_objeto.xls", sep = ';')
df_licitacoes2019 = pd.read_csv("C:/Users/Yuri Oliveira/Desktop/TCC_TCE/Licitacoes_2019.csv", encoding = "ISO-8859-1", sep = ';', usecols = ["objeto"])
#df_licitacoes2019 = pd.read_csv("C:/Users/Yuri Oliveira/Desktop/TCC_TCE/licitacoes.csv", sep = ';', usecols = ["de_Obs"])
#df_licitacoes2019.columns = ['objeto']
#Coloca a descrição do grupo na especificação também
df['Especificação'] = df.Especificação + " " + df.Descrição
#Converte todas as palavras para letra minuscula
df.Especificação = df.Especificação.str.lower()
df_licitacoes2019.objeto = df_licitacoes2019.objeto.str.lower()
#Remove acentos
df['Especificação'] = df.Especificação.apply(lambda x: unidecode.unidecode(x))
df_licitacoes2019['objeto'] = df_licitacoes2019.objeto.apply(lambda x: unidecode.unidecode(str(x)))
#Remove caracteres especiais e numeros
df['Especificação'] = df.Especificação.apply(lambda x: re.sub('[^a-zA-Z]+', ' ', x))
df_licitacoes2019['objeto'] = df_licitacoes2019.objeto.apply(lambda x: re.sub('[^a-zA-Z]+', ' ', x))
#Remove StopWords
stop = nltk.corpus.stopwords.words('portuguese')
newStopWords = ['adesao','aquisicao','servico','servicos','afins',
'destinada','geral','via','etc','utilizados',
'outros','uso','nao','caso','tais','qualquer',
'neste','compreende','publicos','ate','todos',
'ser','destinacao','prestados','diversos','usos',
'abastecimento','zona','rural','pregao','presencial',
'contratacao','municipio','municipal','empresa',
'atender','necessidades','destinados','registro',
'especializada','conforme','fornecimento','prestacao',
'secretarias','sao','municipio','destinado','joao',
'execucao','forma','grande','tipo','demanda','jose','ata',
'rede','redes','leva','fim','menores','parcela','parcelas',
'populacao','produtos','bem','derivado','derivados',
'pb','aquisicoes']
stop.extend(newStopWords)
df['Especificação'] = df.Especificação.apply(lambda x: ' '.join([word for word in x.split() if word not in (stop)]))
df_licitacoes2019['objeto'] = df_licitacoes2019.objeto.apply(lambda x: ' '.join([word for word in x.split() if word not in (stop)]))
#Tokenizing
df['tokenized_sents'] = df.apply(lambda row: nltk.word_tokenize(row['Especificação']), axis=1)
df_licitacoes2019['tokenized_sents'] = df_licitacoes2019.apply(lambda row: nltk.word_tokenize(row['objeto']), axis=1)
#stemming the text (se quiser usar o stemming, só descomentar as 3 linhas abaixo)
stemmer = nltk.stem.RSLPStemmer()
df['tokenized_sents'] = df["tokenized_sents"].apply(lambda x: [stemmer.stem(y) for y in x])
df_licitacoes2019['tokenized_sents'] = df_licitacoes2019["tokenized_sents"].apply(lambda x: [stemmer.stem(y) for y in x])
#Removendo "palavras" menores que 3
#df_licitacoes2019['tokenized_sents'] = df_licitacoes2019.tokenized_sents.apply(lambda x:[x.remove(palavra) if len(palavra) < 3 else palavra for palavra in x])
#df['tokenized_sents'] = df.tokenized_sents.apply(lambda x:[x.remove(palavra) if len(palavra) < 3 else palavra for palavra in x])
#removing Nones
df_licitacoes2019['tokenized_sents'] = df_licitacoes2019.tokenized_sents.apply(lambda x: list(filter(None, x)))
df['tokenized_sents'] = df.tokenized_sents.apply(lambda x: list(filter(None, x)))
#retira tokens duplicados
df_licitacoes2019['tokenized_sents'] = df_licitacoes2019.tokenized_sents.apply(lambda x: list(set(x)))
df['tokenized_sents'] = df.tokenized_sents.apply(lambda x: list(set(x)))
#transforma numma lista de lista para alimentar o LSI
lista = list(df.tokenized_sents.values)
lista_licitacoes = list(df_licitacoes2019.tokenized_sents.values)
'''
Fim do Tratamento dos dados
'''
'''
LSI
'''
from gensim import corpora
from gensim import models
from gensim import similarities
#https://www.machinelearningplus.com/nlp/gensim-tutorial/#11howtocreatetopicmodelswithlda
dct = corpora.Dictionary(lista)
corpus = [dct.doc2bow(line) for line in lista]
#Modelo LSI (100 topicos e 100 power_iterations)
lsi = models.LsiModel(corpus, id2word=dct, num_topics=100, power_iters = 100)
#cria a matriz de similaridade dos grupos
index = similarities.MatrixSimilarity(lsi[corpus])
'''
Fim do LSI
'''
#Funcao pra testar com uma unica licitacao(index do dataframe) e mostra os 5 grupos mais similares
def maisSimilares(index_licitacao):
#transforma a descricao da licitacao no espaco vetorial do LSI
vec_bow = dct.doc2bow(df_licitacoes2019.tokenized_sents[index_licitacao])
vec_lsi = lsi[vec_bow] # convert the query to LSI space
#Armazena a similaridade da entrada com cada um dos grupos
sims = index[vec_lsi]
#Mostra os 5 grupos mais similares com a licitacao de entrada
sims = sorted(enumerate(sims), key=lambda item: -item[1])
for i, s in enumerate(sims[0:5]):
print(s, df.Descrição[s[0]])
'''
Rotula as Licitacoes
'''
#cria a coluna "classificacao" no dataframe
def maiorSimilaridade(licitacao_entrada):
#transforma a descricao no espaco vetorial do LSI
vec_bow = dct.doc2bow(licitacao_entrada)
vec_lsi = lsi[vec_bow] # convert the query to LSI space
#Armazena a similaridade da entrada com cada um dos grupos
sims = index[vec_lsi]
#ordena as similaridades em ordem decrescente
sims = sorted(enumerate(sims), key=lambda item: -item[1])
#retorna o grupo que possui a maior similaridade
#if sims[0][1] > 0.65:
if sims[0][1] != 0:
return df.Descrição[sims[0][0]]
else:
return "outro"
#retorna a similaridade do grupo mais similar a licitacao
def maiorSimilaridade1(licitacao_entrada):
#transforma a descricao no espaco vetorial do LSI
vec_bow = dct.doc2bow(licitacao_entrada)
vec_lsi = lsi[vec_bow] # convert the query to LSI space
#Armazena a similaridade da entrada com cada um dos grupos
sims = index[vec_lsi]
#ordena as similaridades em ordem decrescente
sims = sorted(enumerate(sims), key=lambda item: -item[1])
#retorna o grupo que possui a maior similaridade
return sims[0][1]
#Classificando todas as licitacoes
df_licitacoes2019['classificacao'] = df_licitacoes2019.apply(lambda row: maiorSimilaridade(row['tokenized_sents']), axis=1)
df_licitacoes2019['similaridade'] = df_licitacoes2019.apply(lambda row: maiorSimilaridade1(row['tokenized_sents']), axis=1)
freq_grupos = df_licitacoes2019.classificacao.value_counts()
#Top 10 licitações escolhidas para compor a pesquisa que será mostrada nos resultados
#Esta pesquisa tem como objetivo verificar a porcentagem de acerto que o algoritmo teve
#Os dados serão utilizados em uma matriz de confusão
df_ga = df_licitacoes2019[(df_licitacoes2019.classificacao == 'GÊNEROS ALIMENTÍCIOS') & (df_licitacoes2019.similaridade > 0.65)]
df_lv = df_licitacoes2019[(df_licitacoes2019.classificacao == 'LOCAÇÃO DE VEÍCULOS') & (df_licitacoes2019.similaridade > 0.65)]
df_li = df_licitacoes2019[(df_licitacoes2019.classificacao == 'LOCAÇÃO DE IMÓVEIS') & (df_licitacoes2019.similaridade > 0.65)]
df_c = df_licitacoes2019[(df_licitacoes2019.classificacao == 'CONSULTORIA') & (df_licitacoes2019.similaridade > 0.65)]
df_o = df_licitacoes2019[(df_licitacoes2019.classificacao == 'OBRAS') & (df_licitacoes2019.similaridade > 0.65)]
df_cp = df_licitacoes2019[(df_licitacoes2019.classificacao == 'FORNECIMENTO DE ÁGUA POTÁVEL EM CAMINHÃO-PIPA') & (df_licitacoes2019.similaridade > 0.65)]
df_sa = df_licitacoes2019[(df_licitacoes2019.classificacao == 'SERVIÇOS PRESTADOS POR PROFISSIONAL DO SETOR ARTÍSTICO') & (df_licitacoes2019.similaridade > 0.65)]
df_st = df_licitacoes2019[(df_licitacoes2019.classificacao == 'SERVIÇO DE MANUTENÇÃO E SUPORTE TÉCNICO DE EQUIPAMENTOS DE INFORMÁTICA') & (df_licitacoes2019.similaridade > 0.65)]
df_tp = df_licitacoes2019[(df_licitacoes2019.classificacao == 'SERVIÇOS DE TRANSPORTE DE PASSAGEIROS') & (df_licitacoes2019.similaridade > 0.65)]
df_cl = df_licitacoes2019[(df_licitacoes2019.classificacao == 'COMBUSTÍVEIS E LUBRIFICANTES') & (df_licitacoes2019.similaridade > 0.65)]
#pega uma amostra de cada grupo que será utilizado na pesquisa para obtencao dos resultados
df_pesquisa = pd.concat([df_ga.sample(50), df_lv.sample(50), df_li.sample(50), df_c.sample(50), df_o.sample(50),
df_cp.sample(50), df_sa.sample(50), df_st.sample(50), df_tp.sample(50), df_cl.sample(50)])
#deleta os dataframes não mais utilizados
del [[df_ga,df_lv,df_li,df_c,df_o,df_cp,df_sa,df_st,df_tp,df_cl]]
gc.collect()
#dataframes de referencia
df_gref = pd.read_excel("C:/Users/Yuri Oliveira/Desktop/TCC_TCE/tabela_codigo_do_objeto.xls", sep = ';')
df_lref = pd.read_csv("C:/Users/Yuri Oliveira/Desktop/TCC_TCE/Licitacoes_2019.csv", encoding = "ISO-8859-1", sep = ';', usecols = ["objeto"])
df_gref.columns = ['codigo', 'nome_grupo', 'especificacao']
#joining dataframes
df_pesquisa = pd.merge(df_pesquisa, df_lref, left_index=True, right_index=True)
df_pesquisa = pd.merge(df_pesquisa, df_gref, left_on = 'classificacao', right_on = 'nome_grupo')
#Extraindo apenas as colunas que serão utilizadas na pesquisa
cols = [4,6,7]
df_pesquisa = df_pesquisa[df_pesquisa.columns[cols]]
#escreve o dataframe num arquivo csv
df_pesquisa.to_csv(r'C:/Users/Yuri Oliveira/Desktop/TCC_TCE/dados_pesquisa.csv', index = False, sep = ';')
'''
Fim de Rotula as Licitacoes
'''
'''
Testando Licitacoes
'''
#freq_grupos = df_pesquisa.classificacao.value_counts()
#mostra todas as classificacoes de um determinado tipo
#df_testando = df_licitacoes2019[df_licitacoes2019['classificacao'].str.contains('MATERIAL PEDAGÓGICO E DE RECREAÇÃO')]
#pesquisa quais sao os 5 grupos mais relevantes de uma determinada licitacao(pegar indice do dataframe)
#maisSimilares(3)
#Conta a frequencia de todas as palavras do dataframe
#df_pesquisa["freq"] = df_licitacoes2019.tokenized_sents.apply(lambda x: ' '.join(x))
#freq = df_pesquisa.freq.str.split(expand=True).stack().value_counts()
'''
Fim de Testando Licitacoes
'''
'''
Fim do Rotula as Licitacoes
'''
#pesquisar uma string no dataframe
#df[df['de_Obs'].str.contains('oi celular')]
'''
#Conta a frequencia de todas as palavras do dataframe
df["freq"] = df.tokenized_sents.apply(lambda x: ' '.join(x))
freq = df.freq.str.split(expand=True).stack().value_counts()
#Frequencia contratacao/servico
df_contratacao = df[df['de_Obs'].str.contains('contratacao|servico')]
df["freq"] = df.tokenized_sents.apply(lambda x: ' '.join(x))
freq_contratacao = df_contratacao.freq.str.split(expand=True).stack().value_counts()
df_locServ = df_contratacao[df_contratacao['de_Obs'].str.contains('locacao')]
'''
'''
df['de_Obs'] = df['de_Obs'].apply(lambda x: nlp(x))
tokens = []
lemma = []
pos = []
for doc in nlp.pipe(df['de_Obs'].astype('unicode').values, n_threads=3):
if doc.is_parsed:
tokens.append([n.text for n in doc])
lemma.append([n.lemma_ for n in doc])
pos.append([n.pos_ for n in doc])
else:
# We want to make sure that the lists of parsed results have the
# same number of entries of the original Dataframe, so add some blanks in case the parse fails
tokens.append(None)
lemma.append(None)
pos.append(None)
df['species_tokens'] = tokens
df['species_lemma'] = lemma
df['species_pos'] = pos
'''
'''
#Lemmatization
from nltk.stem import WordNetLemmatizer
lemmatizer = WordNetLemmatizer()
#Tokenizing the text
df_licitacoes2019['tokenized_sents'] = df.apply(lambda row: nltk.word_tokenize(row['df_without_stopwords']), axis=1)
#stemming the text (demora pra carai)
stemmer = nltk.stem.RSLPStemmer()
df_licitacoes2019['stemmed'] = df_licitacoes2019["tokenized_sents"].apply(lambda x: [stemmer.stem(y) for y in x])
from nltk.corpus import wordnet
syns = wordnet.synsets("program")
print(syns[0].name())
print(syns[0].lemmas()[0].name())
print(syns[0].definition())
print(syns[0].examples())
#http://wordnet.pt/
#https://babelnet.org/guide
#http://compling.hss.ntu.edu.sg/omw/summx.html
#http://ontopt.dei.uc.pt/index.php?sec=consultar
#http://www.clul.ulisboa.pt/en/
#http://multiwordnet.fbk.eu/online/multiwordnet.php
#https://github.com/own-pt/openWordnet-PT/wiki
#http://babelscape.com/doc/pythondoc/pybabelnet.html
#https://sites.google.com/site/renatocorrea/temas-de-interesse/processamento-de-linguagem-natural
#https://imasters.com.br/back-end/aprendendo-sobre-web-scraping-em-python-utilizando-beautifulsoup
'''
| true
|
4793ee3b0e6ded2b9751bb2e5a1a73e87f6afc4a
|
Python
|
bugrahan-git/ML-IAGFP
|
/Transform.py
|
UTF-8
| 2,098
| 3.15625
| 3
|
[] |
no_license
|
import random
import cv2
import imgaug.augmenters as iaa
import numpy as np
"""Class to transform images with features random_rotation, random_noise, horizontal_flip"""
class Transform:
def __init__(self):
self.ctr = 0
self.available_transformations = {
'rotate': self.random_rotation,
'horizontal_flip': self.horizontal_flip,
'noise': self.add_noise,
'crop': self.crop,
'shear': self.shear,
}
def random_rotation(self, image_array: np.ndarray):
rotate = iaa.Affine(rotate=(random.randint(-90, -1), random.randint(1, 179)))
return rotate.augment_image(image_array)
def add_noise(self, image_array: np.ndarray):
gaussian_noise = iaa.AdditiveGaussianNoise(10, 20)
return gaussian_noise.augment_image(image_array)
def horizontal_flip(self, image_array: np.ndarray):
flip_hr = iaa.Fliplr(p=1.0)
return flip_hr.augment_image(image_array)
def crop(self, image_array: np.ndarray):
crop = iaa.Crop(percent=(0, 0.3))
return crop.augment_image(image_array)
def shear(self, image_array: np.ndarray):
shear = iaa.Affine(shear=(0, 40))
return shear.augment_image(image_array)
def transform_image(self, image_to_transform, folder_path):
num_transformations_to_apply = random.randint(1, len(self.available_transformations))
num_transformations = 0
transformed_image = None
while num_transformations <= num_transformations_to_apply:
# choose a random transformation to apply for a single image
key = random.choice(list(self.available_transformations))
transformed_image = self.available_transformations[key](image_to_transform)
num_transformations += 1
new_file_path = '%s/augmented_image_%s.jpg' % (folder_path, self.ctr)
# write image to the disk
cv2.imwrite(new_file_path, transformed_image)
self.ctr += 1
return transformed_image
| true
|
9c2201971ce043cb1fcad12027a848a2900f20e2
|
Python
|
uu64/leetcode
|
/solution/python3/83.remove-duplicates-from-sorted-list.py
|
UTF-8
| 656
| 2.9375
| 3
|
[] |
no_license
|
#
# @lc app=leetcode id=83 lang=python3
#
# [83] Remove Duplicates from Sorted List
#
# @lc code=start
# Definition for singly-linked list.
# class ListNode:
# def __init__(self, val=0, next=None):
# self.val = val
# self.next = next
class Solution:
def deleteDuplicates(self, head: ListNode) -> ListNode:
dummy = ListNode(-101)
current = dummy
while True:
if not head:
break
if current.val < head.val:
current.next = ListNode(head.val)
current = current.next
head = head.next
return dummy.next
# @lc code=end
| true
|
edba730b323f03d97d414b377cea5d8b72fc10e1
|
Python
|
datasigntist/mlforall
|
/scripts/iris_python_Script_Chapter_6.py
|
UTF-8
| 1,175
| 3.25
| 3
|
[] |
no_license
|
#
# Created : 6-Dec-2016
#
import numpy as np
import matplotlib.pyplot as plt
####Script Part 6.1
from sklearn import datasets
iris = datasets.load_iris()
print(iris.feature_names)
X = iris.data
print(iris.target_names)
y = iris.target
print('Shape of X %d rows %d columns'%X.shape)
print(X[0],iris.target_names[y[0]])
#########################################
####Script Part 6.2
def sigmoid(z):
return 1/(1+np.exp(-z))
dataSet = np.arange(-10.0,10.0,0.1)
sigmoiddataSet = sigmoid(dataSet)
plt.plot(dataSet,sigmoiddataSet)
plt.show()
#########################################
####Script Part 6.3
dataSet = np.arange(0.0,1.0,0.01)
plt.plot(dataSet,-np.log(dataSet))
plt.show()
#########################################
####Script Part 6.4
dataSet = np.arange(0.0,1.0,0.01)
plt.plot(dataSet,-np.log(1-dataSet))
plt.show()
#########################################
####Script Part 6.5
X = X[y!=2,:]
y = y[y!=2]
from sklearn.linear_model import LogisticRegression
logistic = LogisticRegression()
logistic.fit(X,y)
print('Predicted value of %s is %s'%(X[1,:],iris.target_names[logistic.predict_proba(X[1,:]).argmax()]))
#########################################
| true
|
02894b03c4d4b293759a0ab7022c445c086ba562
|
Python
|
anthonywritescode/aoc2018
|
/day22/part2.py
|
UTF-8
| 3,380
| 2.59375
| 3
|
[] |
no_license
|
import argparse
import enum
import functools
import sys
from typing import Dict
from typing import Generator
from typing import Set
from typing import Tuple
import pytest
from support import timing
class Tool(enum.IntEnum):
TORCH = 1
CLIMBING_GEAR = 2
NOTHING = 3
REGION_ROCKY = 0
REGION_WET = 1
REGION_NARROW = 2
REGIONS_TO_TOOLS = {
REGION_ROCKY: {Tool.TORCH, Tool.CLIMBING_GEAR},
REGION_WET: {Tool.CLIMBING_GEAR, Tool.NOTHING},
REGION_NARROW: {Tool.TORCH, Tool.NOTHING},
}
def compute(s: str) -> int:
_, depth_s, _, coord_s = s.split()
coord_x_s, coord_y_s = coord_s.split(',')
depth, coord_x, coord_y = int(depth_s), int(coord_x_s), int(coord_y_s)
@functools.lru_cache(maxsize=None)
def _erosion_level(x: int, y: int) -> int:
return (_geologic_index(x, y) + depth) % 20183
@functools.lru_cache(maxsize=None)
def _geologic_index(x: int, y: int) -> int:
if y == 0:
return x * 16807
elif x == 0:
return y * 48271
elif (x, y) == (coord_x, coord_y):
return 0
else:
return _erosion_level(x - 1, y) * _erosion_level(x, y - 1)
def _region(x: int, y: int) -> int:
return _erosion_level(x, y) % 3
start = (0, 0, Tool.TORCH)
dest = (coord_x, coord_y, Tool.TORCH)
paths: Set[Tuple[int, int, Tool]] = {start}
times: Dict[Tuple[int, int, Tool], int] = {start: 0}
bad_upper_bound = coord_x * 8 + coord_y * 8
def _legal_and_better(cand: Tuple[int, int, Tool], time: int) -> bool:
x, y, tool = cand
return (
# in bound and valid tool for the region
x >= 0 and y >= 0 and tool in REGIONS_TO_TOOLS[_region(x, y)] and
# better time if we've previously gone here
time < times.get(cand, sys.maxsize) and
# termination pruning
time < times.get(dest, bad_upper_bound)
)
def _next(
x: int, y: int, tool: Tool,
) -> Generator[Tuple[int, int, Tool], None, None]:
time = times[(x, y, tool)]
region_type = _region(x, y)
# try switching tool first
cand_time = time + 7
cand_tool, = REGIONS_TO_TOOLS[region_type] - {tool}
cand = (x, y, cand_tool)
if _legal_and_better(cand, cand_time):
times[cand] = cand_time
yield cand
# try moving next
for x_c, y_c in ((-1, 0), (1, 0), (0, -1), (0, 1)):
cand_time = time + 1
cand = (x + x_c, y + y_c, tool)
if _legal_and_better(cand, cand_time):
times[cand] = cand_time
yield cand
while paths:
paths = {
new_path
for cand_x, cand_y, tool in paths
for new_path in _next(cand_x, cand_y, tool)
}
return times[dest]
@pytest.mark.parametrize(
('input_s', 'expected'),
(
(
'depth: 510\n'
'target: 10,10\n',
45,
),
),
)
def test(input_s: str, expected: int) -> None:
assert compute(input_s) == expected
def main() -> int:
parser = argparse.ArgumentParser()
parser.add_argument('data_file')
args = parser.parse_args()
with open(args.data_file) as f, timing():
print(compute(f.read()))
return 0
if __name__ == '__main__':
exit(main())
| true
|
f92d9488797c04e26fc142721f5dbebc5e42ce48
|
Python
|
citizen-stig/coverage-jinja-plugin
|
/jinja_coverage/plugin.py
|
UTF-8
| 2,415
| 2.625
| 3
|
[] |
no_license
|
# -*- encoding: utf-8 -*-
"""
Coverage Plugin for Jinja2 Template Engine
"""
import coverage.plugin
debug = True
class JinjaPlugin(coverage.plugin.CoveragePlugin):
def file_tracer(self, filename):
if filename.endswith('.html'):
return FileTracer(filename)
class FileTracer(coverage.plugin.FileTracer):
def __init__(self, filename):
self.filename = filename
def source_filename(self):
return self.filename
def line_number_range(self, frame):
template = frame.f_globals.get('__jinja_template__')
if template is None:
return -1, -1
lines_map = get_line_map(template)
if not lines_map:
return 1, get_template_lines_number(template)
keys = sorted(list(lines_map.keys()))
smallest = keys[0]
largest = keys[-1]
if frame.f_lineno < smallest:
if debug:
print('f_line no {0} < smallest {1}, return 1, {2}'.format(
frame.f_lineno, smallest, lines_map[smallest] - 1))
return 1, lines_map[smallest] - 1
elif frame.f_lineno > largest:
start = lines_map[largest] + 1
end = get_template_lines_number(template)
if debug:
print('f_line {0} > largest {2}, return {2}, {3}'.format(
frame.f_lineno, largest, start, end))
return start, end
elif smallest <= frame.f_lineno < largest:
if frame.f_lineno in lines_map:
start = lines_map[frame.f_lineno]
next_key_index = keys.index(frame.f_lineno) + 1
end = lines_map[keys[next_key_index]] - 1
if debug:
print('f_line {0}, map {1}, return {2}, {3}'.format(
frame.f_lineno, lines_map, start, end))
return start, end
return -1, -1
def get_template_lines_number(template):
with open(template.filename) as template_file:
lines_count = sum(1 for _ in template_file)
return lines_count
def get_line_map(template):
lines_map = {}
if template._debug_info:
# _debug_info = '7=8&9=17'
for pair in template._debug_info.split('&'):
original, compiled = pair.split('=')
original, compiled = int(original), int(compiled)
lines_map[compiled] = original
return lines_map
| true
|
63a7f36dbcba8e8b41625109d0cd11b75d66d55e
|
Python
|
psusmit/algorithms
|
/algorithms/stringOps/palindrome.py
|
UTF-8
| 133
| 2.765625
| 3
|
[
"MIT"
] |
permissive
|
#@author susmit
#program to check palindrone in python for strings and integer numbers
def palindrome():
return s == s[::-1]
| true
|
a184a1599eccc996bdf9b6edd773d9fd01bdd3a0
|
Python
|
gilsonsantos03/PythonWebCoursera
|
/semana2/regex.py
|
UTF-8
| 842
| 3.125
| 3
|
[] |
no_license
|
import re
string = 'oi eu sou o 1 goku e tambem O 3 goku'
y = re.findall('[aeiou]+',string)
z = re.findall('[0-9]+',string)
print(y)
print(z)
#############################################333
string2 = 'From: Using the : character'
y2 = re.findall('^F.+:', string2)
print(y2)
correct = re.findall('^F.+?:', string2)
print(correct)
################################################
string3 = 'From gilsonlopes1921@gmail.com sat jan 2102-09-21'
y3 = re.findall('\S+@\S+', string3)
print(y3)
y4 = re.findall('^From (\S+@\S+)', string3)
##os parenteses indicam onde comeca e onde deve parar a string a ser extraida
print(y4)
y5 = re.findall('@([^ ]*)', string3)
print(y5)
y5 = re.findall('^From .*@([^ ]*)', string3)
#####################################3
string5 = 'isso custou apenas $10.00 for cookies'
y43 = re.findall('\$[0-9.]+', string5)
print(y43)
| true
|
4c2030a379e4f3ca246ecb56f3bfaccf71fe825f
|
Python
|
DOGEE7/Python
|
/5高级特性.py
|
UTF-8
| 4,800
| 4
| 4
|
[] |
no_license
|
# ======================切片Slice=========================
L = ['a', 'b', 'c', 'd', 'f', 'e', 'g', 'f']
L1 = list(range(17))
r = []
n = 5
for i in range(n):
r.append(L[i])
print(r) # ['a', 'b', 'c', 'd', 'f']
print(L1[:]) # [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]
print(L[1:3]) # ['b', 'c']
print(L[-2:]) # ['g', 'f']
print(L[-2:-1]) # ['g']
print(L[:5]) # ['a', 'b', 'c', 'd', 'f']
print(L[:6:2]) # ['a', 'c', 'f']前十个,每两个取一个
print(L[::3]) # ['a', 'd', 'g']所有数,每3个取一个
print((0, 1, 2, 3, 4, 5, 6)[:4]) # (0, 1, 2, 3)
print('ABCDEFGH'[::3]) # ADG
# 练习 利用切片操作,实现一个trim()函数,去除字符串首尾的*字符
def trim(sentence):
n = len(sentence)
a = 0
b = n
for i in range(n):
if sentence[i] == '*':
a = i+1
else:
break
j = n - 1
while sentence[j] == '*':
j -= 1
b = j + 1
return sentence[a:b]
l = '***This is Python!****'
print(trim(l))
# =====================迭代=======================
d = {'city': 'Xiamen', 'college': 'HQU', 'age': 20, 'profession': 'Network Engineer'}
for k, v in d.items():
print(str(k) + ':' + str(v))
# city:Xiamen
# college:HQU
# age:20
# profession:Network Engineer
from collections import Iterable,Iterator
print(isinstance('abc', Iterable)) # str是否可迭代 True
print(isinstance([1, 2, 3, 4], Iterable)) # list是否可迭代 True
print(isinstance(123, Iterable)) # False
for i, value in enumerate(['A', 'B', 'C']):
print(i, value)
# city:Xiamen
# college:HQU
# age:20
# profession:Network Engineer
for x, y in [(1, 1), (2, 4), (3, 9)]:
print(x, y)
# 1 1
# 2 4
# 3 9
# 练习
def findMinAndMax(L):
max = L[0]
min = L[0]
for index, value in enumerate(L):
if min > L[index]:
min = L[index]
# print('min=' + str(min))
if max < L[index]:
max = L[index]
# print('max=' + str(max))
T = (min, max)
print(T)
L = [55, 96, 22, 57, 45, 36, 16, 20]
findMinAndMax(L) # (16, 96)
# ========================列表生成式=====================
print(list(range(1, 11))) # [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
print([x * x for x in range(1, 11)]) # [1, 4, 9, 16, 25, 36, 49, 64, 81, 100]
print([x * x for x in range(1, 11) if x % 2 == 0]) # [4, 16, 36, 64, 100]
print([m + n for m in 'ABC' for n in 'XYZ']) # 两层循环 ['AX', 'AY', 'AZ', 'BX', 'BY', 'BZ', 'CX', 'CY', 'CZ']
d = {'x': 'A', 'y': 'B', 'z': 'C'}
print([k + '=' + v for k, v in d.items()]) # ['x=A', 'y=B', 'z=C']
Li = ['hello', 'world', 'imb', 'apple']
print([s.upper() for s in Li]) # ['HELLO', 'WORLD', 'IMB', 'APPLE']
L1 = ['hello', 'world', 'apple', 18, None]
[s1.upper() for s1 in L1 if isinstance(s1, str)] # ['HELLO', 'WORLD', 'IMB', 'APPLE']
# ============================生成器==============================
g = (x * x for x in range(10))
for n in g:
print(n)
# 0
# 1
# 4
# 9
# 16
# 25
# 36
# 49
# 64
# 81
# 斐波拉契数列
def fib(max):
n, a, b = 0, 0, 1
while n < max:
# print(b)
yield (b)
a, b = b, a + b
n = n + 1
return 'done'
print(fib(10)) # <generator object fib at 0x0339BB30>
for num in fib(6):
print(num)
# 1
# 1
# 2
# 3
# 5
# 8
# 练习:杨辉三角
def triangle(n):
L1 = [1]
yield L1
for i in range(n - 1):
L2 = [1]
# if n > 2:
for j in range(i):
add = L1[j] + L1[j + 1]
L2.append(add)
L2.append(1)
yield L2
L1 = L2
list1 = triangle(6)
for list_1 in list1:
print(list_1)
# [1]
# [1, 1]
# [1, 2, 1]
# [1, 3, 3, 1]
# [1, 4, 6, 4, 1]
# [1, 5, 10, 10, 5, 1]
# =========================迭代器====================
from collections import Iterator, Iterable
# 这些可以直接作用于for循环的对象统称为可迭代对象:Iterable。
# 可以使用isinstance()判断一个对象是否是Iterable对象
print(isinstance([], Iterable)) # True
print(isinstance({}, Iterable)) # True
print(isinstance('abc', Iterable)) # True
print(isinstance((x for x in range(10)), Iterable)) # True
print(isinstance(100, Iterable)) # False
# 使用isinstance()判断一个对象是否是Iterator(迭代器)对象
print(isinstance((x for x in range(10)), Iterator)) # True
print(isinstance([], Iterator)) # False
print({}, Iterator) # {} <class 'collections.abc.Iterator'>
print('abc', Iterator) # abc <class 'collections.abc.Iterator'>
# 生成器都是Iterator对象,但list、dict、str虽然是Iterable,却不是Iterator。
# 把list、dict、str等Iterable变成Iterator可以使用iter()函数
print(isinstance(iter([]), Iterator)) # True
print(isinstance(iter({}), Iterator)) # True
print(isinstance(iter('abc'), Iterator)) # True
| true
|
f5c5c2ac690dff9e6a5ba11f3eb4bccbc0f0f124
|
Python
|
MifengbushiMifeng/pyanalyze
|
/multi_process/my_except.py
|
UTF-8
| 311
| 2.953125
| 3
|
[] |
no_license
|
def base_exception():
print('base_exception start')
middle_func()
print('base_exception finish')
def middle_func():
try:
raise_exception()
except:
print('An exception occurred!')
def raise_exception():
raise IOError;
if __name__ == '__main__':
base_exception()
| true
|
a12cc4fa4fb964311fdf22d7c117f1c0c72b67ce
|
Python
|
AlexMabry/aoc20
|
/day01/d1a.py
|
UTF-8
| 190
| 3.015625
| 3
|
[
"MIT"
] |
permissive
|
numbers = [int(n) for n in open('d1in.txt').read().splitlines()]
numberSet = {n for n in numbers}
for n in numberSet:
if (2020-n) in numberSet:
print((2020-n)*n)
break
| true
|
15990e908c663a1ebeece9e8c264bfcaef3c0c0a
|
Python
|
markvassell/Summer_2016
|
/corn_model.py
|
UTF-8
| 1,272
| 3.0625
| 3
|
[] |
no_license
|
from csv import DictReader
import pandas as pd
import scipy as sy
import matplotlib.pyplot as plt
import numpy as np
# Years
starting_year = 2014
ending_year = 2023
years = range(starting_year, ending_year + 1)
def main():
count = 0
# name of the file
file = "data.csv"
all_rows = []
try:
with open(file) as csv_file:
basedata = DictReader(csv_file)
for row in basedata:
all_rows.append(row)
if(int(row['YEAR']) in years):
print("Corn expected market price: ", cemp(all_rows))
print("Corn expected yield: ", cey(all_rows))
except IOError as e:
print("I/O error({0}): {1}".format(e.errno, e.strerror))
exit(e.errno)
except ValueError:
print("ValueError")
# Nominal reference price (Deflator)
def nfp(rows):
print("hello")
# Corn expect market price
def cemp(rows):
current_year = rows[-1]
price = float(current_year['prPPCO']) - float(current_year['prEPCOmkt'])
return price
# Corn expect yield
def cey(rows):
current_year = rows[-1]
cyield = -2.28290274642977 + 0.107815751601678 * (int(current_year['YEAR']) - 1900) - float(current_year['crYECOto'])
return cyield
main()
| true
|
d049ced202c1984920dd4a2d22469676f66c1476
|
Python
|
ivivan/Imputation_Review
|
/paper_related/change_gap_size.py
|
UTF-8
| 1,601
| 2.703125
| 3
|
[
"MIT"
] |
permissive
|
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from matplotlib.patches import Circle, RegularPolygon
from matplotlib.path import Path
from matplotlib.projections.polar import PolarAxes
from matplotlib.projections import register_projection
from matplotlib.spines import Spine
from matplotlib.transforms import Affine2D
if __name__ == '__main__':
data_source_path = 'data\plot_sample.csv'
data_source = pd.read_csv(data_source_path,header=0)
data_source_transposed = data_source.T
columns_name = ['Dual-SSIM','SSIM','BRITS','M-RNN','EM','MICE','Mean','LOCF','Linear']
data_source_transposed.columns = columns_name
print(data_source_transposed)
# These are the "Tableau 20" colors as RGB.
tableau20 = [(31, 119, 180), (174, 199, 232), (255, 127, 14), (255, 187, 120),
(44, 160, 44), (152, 223, 138), (214, 39, 40), (255, 152, 150),
(148, 103, 189), (197, 176, 213), (140, 86, 75), (196, 156, 148),
(227, 119, 194), (247, 182, 210), (127, 127, 127), (199, 199, 199),
(188, 189, 34), (219, 219, 141), (23, 190, 207), (158, 218, 229)]
# Scale the RGB values to the [0, 1] range, which is the format matplotlib accepts.
for i in range(len(tableau20)):
r, g, b = tableau20[i]
tableau20[i] = (r / 255., g / 255., b / 255.)
lines = data_source_transposed.plot.line(figsize=(6,8))
lines.set_ylabel("Scaled RMSE",size=18)
lines.set_xlabel("Missing Data Size",size=18)
lines.legend(bbox_to_anchor=(1.0, 1.0),fontsize=18)
plt.show()
| true
|
005657837012aeb17f24ff1d723c2e9dfd41521d
|
Python
|
XuejieSong523920/Artificial_Intelligence_Course_Code
|
/prob1.py
|
UTF-8
| 14,417
| 2.921875
| 3
|
[] |
no_license
|
# -*- coding: utf-8 -*-
"""
Created on Wed Dec 4 16:05:06 2019
@author: Xuejie Song
"""
from sklearn.datasets import load_breast_cancer
from sklearn.datasets import load_iris
from sklearn.datasets import load_wine
from sklearn.datasets import load_digits
from sklearn.utils import shuffle
from sklearn.linear_model import LogisticRegression
from sklearn.linear_model import Perceptron
from sklearn.svm import LinearSVC
from sklearn.neighbors import KNeighborsClassifier
from sklearn.preprocessing import StandardScaler
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import math
import warnings
warnings.filterwarnings("ignore")
breat_X,breast_y = load_breast_cancer(return_X_y=True)
breastcancer = np.column_stack((breat_X,breast_y))
breastcancer = shuffle(breastcancer)
def split_folds(data):
"""
data : array
output : every factor in folds contains a train set and a validation set
"""
# split the data into five folds
nFolds = 5
folds = []
numSamples = data.shape[0]
numLeaveOutPerFold = numSamples // nFolds
for i in range(nFolds):
startInd = i * numLeaveOutPerFold
endInd = min((i + 1) * numLeaveOutPerFold, numSamples)
frontPart = data[:startInd, :]
midPart = data[startInd : endInd, :]
rearPart = data[endInd:, :]
foldData = np.concatenate([frontPart, rearPart], axis=0)
foldInfo = {
'train_x' : foldData[:, :-1],
'train_y' : foldData[:, -1],
'valid_x' : midPart[:, :-1],
'valid_y' : midPart[:, -1]
}
folds.append(foldInfo)
return folds
breastcancer_split_folds = split_folds(breastcancer)
def error_rate_for_logist(data, c):
# error rate in every cross validation
error_rate = []
for i in range(5):
X_train = data[i]['train_x']
y_train = data[i]['train_y']
X_test = data[i]['valid_x']
y_test = data[i]['valid_y']
sc = StandardScaler()
sc.fit(X_train)
X_train_std = sc.transform(X_train)
X_test_std = sc.transform(X_test)
clf = LogisticRegression(random_state=0, C=c, solver='liblinear').fit(X_train_std,y_train)
y_pred = clf.predict(X_test_std)
error = sum(y_test != y_pred)/len(y_test)
error_rate.append(error)
# return error_rate
return np.mean(error_rate),np.std(error_rate)
#plot mean classification error rate in breast cancer data set
alter_C = [0.00001,0.0001,0.001,0.01,0.1,1,10,100,1000]
log_alter_C =[math.log(i,10) for i in alter_C]
result = [error_rate_for_logist(breastcancer_split_folds,i) for i in alter_C]
result = pd.DataFrame(result)
ero = result.iloc[:,0]
std = result.iloc[:,1]
plt.errorbar(log_alter_C,ero,std,color='blue')
plt.title('breast cancer data set', fontsize=20)
plt.xlabel('log(C) in logistic regression')
plt.ylabel('error rate')
plt.show()
def error_rate_for_perceptron(data, a):
# error rate in every cross validation
error_rate = []
for i in range(5):
X_train = data[i]['train_x']
y_train = data[i]['train_y']
X_test = data[i]['valid_x']
y_test = data[i]['valid_y']
sc = StandardScaler()
sc.fit(X_train)
X_train_std = sc.transform(X_train)
X_test_std = sc.transform(X_test)
ppn = Perceptron(penalty = 'l2',alpha = a, eta0=0.1,random_state = 0)
ppn.fit(X_train_std,y_train)
y_pred = ppn.predict(X_test_std)
error = sum(y_test != y_pred)/len(y_test)
error_rate.append(error)
return np.mean(error_rate),np.std(error_rate)
#as the plot above is kind of weird and we can not see the error rate when alpha is very samll,
#so here I will plot error rate vs log(alpha)
alter_a = [0.00001,0.0001,0.001,0.01,0.1,1,10,100,1000]
log_alter_a = [math.log(i,10) for i in alter_a]
result_pe = [error_rate_for_perceptron(breastcancer_split_folds,i) for i in alter_a]
result_pe = pd.DataFrame(result_pe)
ero_pe = result_pe.iloc[:,0]
std_pe = result_pe.iloc[:,1]
plt.errorbar(log_alter_a,ero_pe,std_pe,color='red')
plt.title('breast cancer data set', fontsize=20)
plt.xlabel('log(alpha) in perceptron')
plt.ylabel('error rate')
plt.show()
def error_rate_for_linear_SVM(data, c):
# error rate in every cross validation
error_rate = []
for i in range(5):
X_train = data[i]['train_x']
y_train = data[i]['train_y']
X_test = data[i]['valid_x']
y_test = data[i]['valid_y']
sc = StandardScaler()
sc.fit(X_train)
X_train_std = sc.transform(X_train)
X_test_std = sc.transform(X_test)
clf = LinearSVC(C=c, random_state=0, tol=1e-5,max_iter=1000000)
clf.fit(X_train_std,y_train)
y_pred = clf.predict(X_test_std)
error = sum(y_test != y_pred)/len(y_test)
error_rate.append(error)
return np.mean(error_rate),np.std(error_rate)
alter_C_SVM = [0.00001,0.0001,0.001,0.01,0.1,1,10,100,1000]
log_alter_C_SVM = [math.log(i,10) for i in alter_C_SVM]
result_SVM = [error_rate_for_linear_SVM(breastcancer_split_folds,i) for i in alter_C_SVM ]
result_SVM = pd.DataFrame(result_SVM)
ero_SVM = result_SVM.iloc[:,0]
std_SVM = result_SVM.iloc[:,1]
plt.errorbar(log_alter_C_SVM,ero_SVM,std_SVM,color='green')
plt.title('breast cancer data set', fontsize=20)
plt.xlabel('log(C) in linear SVM')
plt.ylabel('error rate')
plt.show()
def error_rate_for_KNN(data, k):
# error rate in every cross validation
error_rate = []
for i in range(5):
X_train = data[i]['train_x']
y_train = data[i]['train_y']
X_test = data[i]['valid_x']
y_test = data[i]['valid_y']
sc = StandardScaler()
sc.fit(X_train)
X_train_std = sc.transform(X_train)
X_test_std = sc.transform(X_test)
neigh = KNeighborsClassifier(n_neighbors=k)
neigh.fit(X_train_std,y_train)
y_pred = neigh.predict(X_test_std)
error = sum(y_test != y_pred)/len(y_test)
error_rate.append(error)
return np.mean(error_rate),np.std(error_rate)
#plot mean classification error rate in breast cancer data set for k-nearest neighbor(KNN)
alter_k = np.zeros(21)
for i in range(21):
alter_k[i] = 6*i+1
result_KNN = [error_rate_for_KNN(breastcancer_split_folds,int(i)) for i in alter_k ]
result_KNN = pd.DataFrame(result_KNN)
ero_KNN = result_KNN.iloc[:,0]
std_KNN = result_KNN.iloc[:,1]
plt.errorbar(alter_k ,ero_KNN,std_KNN,color= 'skyblue')
plt.title('breast cancer data set', fontsize=20)
plt.xlabel('k in KNN')
plt.ylabel('error rate')
plt.show()
# Then deal with the iris data set:
iris_X,iris_y = load_iris(return_X_y=True)
iris = np.column_stack((iris_X,iris_y))
iris = shuffle(iris)
iris_split_folds = split_folds(iris)
def error_rate_for_logist_for_iris(data, c):
# error rate in every cross validation
error_rate = []
for i in range(5):
X_train = data[i]['train_x']
y_train = data[i]['train_y']
X_test = data[i]['valid_x']
y_test = data[i]['valid_y']
sc = StandardScaler()
sc.fit(X_train)
X_train_std = sc.transform(X_train)
X_test_std = sc.transform(X_test)
clf = LogisticRegression(random_state=0, C=c, solver='lbfgs',multi_class='multinomial').fit(X_train_std,y_train)
y_pred = clf.predict(X_test_std)
error = sum(y_test != y_pred)/len(y_test)
error_rate.append(error)
# return error_rate
return np.mean(error_rate),np.std(error_rate)
result_iris = [error_rate_for_logist_for_iris(iris_split_folds,i) for i in alter_C]
result_iris = pd.DataFrame(result_iris)
ero_iris = result_iris.iloc[:,0]
std_iris = result_iris.iloc[:,1]
plt.errorbar(log_alter_C,ero_iris,std_iris,color='blue')
plt.title('iris data set', fontsize=20)
plt.xlabel('log(C) in logistic regression')
plt.ylabel('error rate')
plt.show()
result_pe_iris = [error_rate_for_perceptron(iris_split_folds,i) for i in alter_a]
result_pe_iris = pd.DataFrame(result_pe_iris)
ero_pe_iris = result_pe_iris.iloc[:,0]
std_pe_iris = result_pe_iris.iloc[:,1]
plt.errorbar(log_alter_a,ero_pe_iris,std_pe_iris,color='red')
plt.title('iris data set', fontsize=20)
plt.xlabel('log(alpha) in perceptron')
plt.ylabel('error rate')
plt.show()
result_SVM = [error_rate_for_linear_SVM(iris_split_folds,i) for i in alter_C_SVM ]
result_SVM = pd.DataFrame(result_SVM)
ero_SVM = result_SVM.iloc[:,0]
std_SVM = result_SVM.iloc[:,1]
plt.errorbar(log_alter_C_SVM,ero_SVM,std_SVM,color='green')
plt.title('iris data set', fontsize=20)
plt.xlabel('log(C) in linear SVM')
plt.ylabel('error rate')
plt.show()
alter_k = np.zeros(4)
for i in range(4):
alter_k[i] = 6*i+1
result_KNN = [error_rate_for_KNN(iris_split_folds,int(i)) for i in alter_k ]
result_KNN = pd.DataFrame(result_KNN)
ero_KNN = result_KNN.iloc[:,0]
std_KNN = result_KNN.iloc[:,1]
plt.errorbar(alter_k ,ero_KNN,std_KNN,color= 'skyblue')
plt.title('iris data set', fontsize=20)
plt.xlabel('k in KNN')
plt.ylabel('error rate')
plt.show()
#Then deal with digits
digits_X,digits_y = load_digits(return_X_y=True)
digits= np.column_stack((digits_X,digits_y))
digits = shuffle(digits)
digits_split_folds = split_folds(digits)
def error_rate_for_logist_for_digits(data, c):
# error rate in every cross validation
error_rate = []
for i in range(5):
X_train = data[i]['train_x']
y_train = data[i]['train_y']
X_test = data[i]['valid_x']
y_test = data[i]['valid_y']
sc = StandardScaler()
sc.fit(X_train)
X_train_std = sc.transform(X_train)
X_test_std = sc.transform(X_test)
clf = LogisticRegression(random_state=0, C=c,max_iter=100, solver='saga',multi_class='multinomial').fit(X_train_std,y_train)
y_pred = clf.predict(X_test_std)
error = sum(y_test != y_pred)/len(y_test)
error_rate.append(error)
# return error_rate
return np.mean(error_rate),np.std(error_rate)
result_digits = [error_rate_for_logist_for_digits(digits_split_folds,i) for i in alter_C]
result_digits = pd.DataFrame(result_digits)
ero_digits = result_digits.iloc[:,0]
std_digits = result_digits.iloc[:,1]
plt.errorbar(log_alter_C,ero_digits,std_digits,color='blue')
plt.title('digits data set', fontsize=20)
plt.xlabel('log(C) in logistic regression')
plt.ylabel('error rate')
plt.show()
result_pe_digits = [error_rate_for_perceptron(digits_split_folds,i) for i in alter_a]
result_pe_digits = pd.DataFrame(result_pe_digits)
ero_pe_digits = result_pe_digits.iloc[:,0]
std_pe_digits = result_pe_digits.iloc[:,1]
plt.errorbar(log_alter_a,ero_pe_digits,std_pe_digits,color='red')
plt.title('digits data set', fontsize=20)
plt.xlabel('log(alpha) in perceptron')
plt.ylabel('error rate')
plt.show()
result_SVM = [error_rate_for_linear_SVM(digits_split_folds,i) for i in alter_C_SVM ]
result_SVM = pd.DataFrame(result_SVM)
ero_SVM = result_SVM.iloc[:,0]
std_SVM = result_SVM.iloc[:,1]
plt.errorbar(log_alter_C_SVM,ero_SVM,std_SVM,color='green')
plt.title('digits data set', fontsize=20)
plt.xlabel('log(C) in linear SVM')
plt.ylabel('error rate')
plt.show()
alter_k = np.zeros(21)
for i in range(21):
alter_k[i] = 6*i+1
result_KNN = [error_rate_for_KNN(digits_split_folds,int(i)) for i in alter_k ]
result_KNN = pd.DataFrame(result_KNN)
ero_KNN = result_KNN.iloc[:,0]
std_KNN = result_KNN.iloc[:,1]
plt.errorbar(alter_k ,ero_KNN,std_KNN,color= 'skyblue')
plt.title('digits data set', fontsize=20)
plt.xlabel('K in KNN')
plt.ylabel('error rate')
plt.show()
# Then deal with wine
wine_X,wine_y = load_wine(return_X_y=True)
wine= np.column_stack((wine_X,wine_y))
wine = shuffle(wine)
wine_split_folds = split_folds(wine)
def error_rate_for_logist_for_wine(data, c):
# error rate in every cross validation
error_rate = []
for i in range(5):
X_train = data[i]['train_x']
y_train = data[i]['train_y']
X_test = data[i]['valid_x']
y_test = data[i]['valid_y']
sc = StandardScaler()
sc.fit(X_train)
X_train_std = sc.transform(X_train)
X_test_std = sc.transform(X_test)
clf = LogisticRegression(random_state=0, C=c,max_iter=100, solver='saga',multi_class='multinomial').fit(X_train_std,y_train)
y_pred = clf.predict(X_test_std)
error = sum(y_test != y_pred)/len(y_test)
error_rate.append(error)
# return error_rate
return np.mean(error_rate),np.std(error_rate)
result_wine = [error_rate_for_logist_for_wine(wine_split_folds,i) for i in alter_C]
result_wine = pd.DataFrame(result_wine)
ero_wine = result_wine.iloc[:,0]
std_wine = result_wine.iloc[:,1]
plt.errorbar(log_alter_C,ero_wine,std_wine,color='blue')
plt.title('wine data set', fontsize=20)
plt.xlabel('log(C) in logistic regression')
plt.ylabel('error rate')
plt.show()
result_pe_wine = [error_rate_for_perceptron(wine_split_folds,i) for i in alter_a]
result_pe_wine = pd.DataFrame(result_pe_wine)
ero_pe_wine = result_pe_wine.iloc[:,0]
std_pe_wine = result_pe_wine.iloc[:,1]
plt.errorbar(log_alter_a,ero_pe_wine,std_pe_wine,color='red')
plt.title('wine data set', fontsize=20)
plt.xlabel('log(alpha) in perceptron')
plt.ylabel('error rate')
plt.show()
result_SVM = [error_rate_for_linear_SVM(wine_split_folds,i) for i in alter_C_SVM ]
result_SVM = pd.DataFrame(result_SVM)
ero_SVM = result_SVM.iloc[:,0]
std_SVM = result_SVM.iloc[:,1]
plt.errorbar(log_alter_C_SVM,ero_SVM,std_SVM,color='green')
plt.title('wine data set', fontsize=20)
plt.xlabel('log(C) in linear SVM')
plt.ylabel('error rate')
plt.show()
result_KNN = [error_rate_for_KNN(wine_split_folds,int(i)) for i in alter_k ]
result_KNN = pd.DataFrame(result_KNN)
ero_KNN = result_KNN.iloc[:,0]
std_KNN = result_KNN.iloc[:,1]
plt.errorbar(alter_k ,ero_KNN,std_KNN,color= 'skyblue')
plt.title('wine data set', fontsize=20)
plt.xlabel('k in KNN')
plt.ylabel('error rate')
plt.show()
| true
|
78ca29c50b81cdd01f61882aeb53eb63d95c5da8
|
Python
|
pingansdaddy/newtempo
|
/src/growing_file.py
|
UTF-8
| 692
| 2.875
| 3
|
[] |
no_license
|
#coding:utf-8
import os, sys, time
class GrowingFile(object):
def __init__(self, fn):
self._fn = fn
self._fd = os.open(self._fn, os.O_RDONLY)
self._max_size = 1024
def run(self):
buf = ''
while True:
res = os.read(self._fd, self._max_size)
if not res:
continue
buf += res
if len(buf) < self._max_size:
continue
else:
sys.stdout.write(buf)
buf = ''
time.sleep(0.01)
if __name__ == '__main__':
try:
fn = sys.argv[1]
GrowingFile(fn).run()
except KeyboardInterrupt:
pass
| true
|
91007434f66aa36b973faa5caa466d39e0cd6c59
|
Python
|
AbhishekDoshi26/python-programs
|
/Panda/retrieve row values.py
|
UTF-8
| 107
| 2.53125
| 3
|
[] |
no_license
|
import pandas as pd
bond = pd.read_csv('Datasets\jamesbond.csv')
data = bond.loc[[0, 1, 25]]
print(data)
| true
|
a6123f6a7d429bd19aafd9b1f7966cf4102a50c1
|
Python
|
Steven-Eardley/lcd_screen
|
/uptimePlz.py
|
UTF-8
| 348
| 2.84375
| 3
|
[] |
no_license
|
from ScreenController import ScreenController
import sys
def main():
screen = ScreenController()
for line in sys.stdin:
uptime = line.split()
screen.println1(uptime[0][:-3] + " " + uptime[1] + " " + uptime[2][:-1])
print(uptime[0][:-3] + " " + uptime[1] + " " + uptime[2][:-1])
if __name__ == '__main__':
main()
| true
|
91802dd9054646385ce1a5c8ca02af19c86fdcb3
|
Python
|
pybites/pyplanet-django
|
/articles/test.py
|
UTF-8
| 176
| 2.65625
| 3
|
[] |
no_license
|
from urllib.parse import urlencode, quote_plus
payload = {'username':'administrator', 'password':'xyz das dasdd'}
result = urlencode(payload, quote_via=quote_plus)
print(result)
| true
|
8652a03b519e4271f547a3c7d7de5e4690f0e051
|
Python
|
git-wsf/crawler_project
|
/haodaifu/haodaifu/utils/deal_excel.py
|
UTF-8
| 1,381
| 2.6875
| 3
|
[] |
no_license
|
#!/usr/bin/python3
# -*- coding: utf-8 -*-
# @time : 18-6-13 下午2:24
# @author : Feng_Hui
# @email : capricorn1203@126.com
import pandas as pd
import os
class CsvToDict(object):
now_path = os.path.dirname(__file__)
def __init__(self, file_name):
super(CsvToDict, self).__init__()
self.file_name = file_name
def read_file(self, size=None, use_cols=None):
"""
:param size:chunk size
:param use_cols:columns needed
:return:chunk data
"""
file_path = os.path.join('/home/cyzs/wksp/my_env/temp_file', self.file_name)
# file_path = os.path.join('/home/fengh/wksp/crawler_project/haodaifu/haodaifu/my_data', self.file_name)
if not os.path.exists(file_path):
raise FileNotFoundError
data = pd.read_csv(file_path,
iterator=True,
usecols=use_cols
)
# data2 = pd.read_csv(file_path, index_col='doctor_id')
# print(data2.head())
# print(data2.info(memory_usage='deep'))
chunk = data.get_chunk(size=size)
# print(len(chunk))
return chunk[2794:2795]
if __name__ == "__main__":
excel_to_dict = CsvToDict('haodf_0703.csv')
my_data = excel_to_dict.read_file(use_cols=['doctor_url'])
my_dict = my_data.to_dict(orient='records')
print(my_dict)
| true
|
9f077ff1b0995636201eea8a6238cc0cf4adc6e2
|
Python
|
rkurti/NetSci-RediYuchenSun
|
/src/League.py
|
UTF-8
| 1,543
| 3
| 3
|
[] |
no_license
|
class League:
def __init__(self, league_name):
self.league_name = league_name
self.transfers_for_year = {}
self.clubs = set()
self.all_transfers = set()
self.front_transfers = set() # all front transfers
self.midfield_transfers = set() # all midfield transfers
self.back_transfers = set() # Defense transfers
self.goalkeeper_transfers = set() # Goalkeeper transfers
def __eq__(self, other):
if isinstance(other, self.__class__):
return self.league_name == other.league_name
return False
def __ne__(self, other):
if isinstance(other, self.__class__):
return self.league_name == other.league_name
return False
def __hash__(self):
return hash(self.league_name)
def show_transfers_for(self, start_year, end_year):
for year in range(start_year, end_year + 1):
print("=======showing the links for " + str(year))
try:
for link in self.transfers_for_year[year]:
link.get_transfer_link_info()
except Exception as e:
print(e)
def show_all_teams_belonging_to_league(self):
print("=====================showing " + str(
len(self.clubs)) + " teams in" + self.league_name + "==================")
for club in self.clubs:
print(str(club.club_id) + "," + club.club_name)
print("-----------done showing teams in " + self.league_name + "------------------")
| true
|
62aa95ef0a6fcb9ba4e5f1d84681a23ff8f630f7
|
Python
|
sk187/IntermediatePython
|
/excercises.py
|
UTF-8
| 2,803
| 4.53125
| 5
|
[] |
no_license
|
# Exercise Code
# Write a method called e() that
# 1. Determines what data type the input is
#
# 2. It returns the input and datatype in a string
# only for strings.
# " INPUT is a <type DATATYPE>"
#
# e('hi')
# => "hi is a <type 'str'>"
#
# If the input is a int or float return the following
# e(5)
# => 'Input cannot be an int'
#
# e(5.0)
# => 'Input cannot be a float'
# Starter Code
def e(input):
datatype = type(input)
if .... :
return "Input cannot be an int"
elif ... :
return "Input cannot be a float"
else:
return "%s is a %s" %(input, datatype)
################################################################################
# s1 = 'What is the air-speed velocity'
# s2 = 'of an unladen swallow?'
#
# 1. Combine s1 and s2 into a new varible called s
#
# 2. Replace "unladen" with "unladen african"
#
# 3. Capitalize "african" by slicing it from s
#
# 4. Count how many spaces there are in s
#
# 5. Get the index of swallow in s
#
# 6. Print a statement with the correct counts so that
# "There are __ spaces and sallow is at the __ index"
#
# With either varible string injection method we learned
#
# Bonus
# 7. Using string slicing, replace, capitalize african in s
################################################################################
# 1. Create a list of the first names
# of your family members.
#
# 2. Print the name of the last person in the list.
#
# 3. Print the length of the name of the first
# person in the list.
#
# 4. Change one of the names from their real name
# to their nickname.
#
# 5. Append a new person to the list.
#
# 6. Change the name of the new person to lowercase
# using the string method 'lower'.
#
# 7. Sort the list in reverse alphabetical order.
#
# 8. Bonus: Sort the list by the length of the names
# (shortest to longest).
################################################################################
# EXERCISE 1:
# Given that: letters = ['a', 'b', 'c']
# Write a list comprehension that returns: ['A', 'B', 'C']
#
# EXERCISE 2 (BONUS):
# Given that: word = 'abc'
# Write a list comprehension that returns: ['A', 'B', 'C']
#
# EXERCISE 3 (BONUS):
# Given that: fruits = ['Apple', 'Banana', 'Cherry']
# Write a list comprehension that returns: ['A', 'B', 'C']
################################################################################
# family = {'dad':'Homer', 'mom':'Marge', 'size':2,
# 'kids': ['bart', 'lisa']}
#
# 1. Print the name of the mom.
# 2. Change the size to 5.
# 3. Add 'Maggie' to the list of kids.
# 4. Fix 'bart' and 'lisa' so that
# the first letter is capitalized.
#
# Bonus: Do this last step using a list comprehension.
################################################################################
| true
|
1eb63fd4709078d5d0519a2a39871a57c4e0dcd4
|
Python
|
Axonify/muffin.io
|
/skeletons/gae/apps/decorators.py
|
UTF-8
| 1,445
| 2.828125
| 3
|
[
"MIT"
] |
permissive
|
from google.appengine.api import memcache
import json
from apps import DEBUG
#
# Decorators
#
def memcached(age):
"""
Note that a decorator with arguments must return the real decorator that,
in turn, decorates the function. For example:
@decorate("extra")
def function(a, b):
...
is functionally equivallent to:
function = decorate("extra")(function)
"""
def inner_memcached(func):
""" A decorator that implements the memcache pattern """
def new_func(requestHandler, *args, **kwargs):
result = memcache.get(requestHandler.request.url)
if result is None or age == 0 or DEBUG:
# Use compact JSON encoding
result = json.dumps(func(requestHandler, *args, **kwargs), separators=(',',':'))
memcache.set(requestHandler.request.url, result, age)
requestHandler.response.headers["Content-Type"] = "application/json"
requestHandler.response.out.write(result)
return new_func
return inner_memcached
def as_json(func):
"""Dump in JSON format"""
def new_func(requestHandler, *args, **kwargs):
# Use compact JSON encoding
result = json.dumps(func(requestHandler, *args, **kwargs), separators=(',',':'))
requestHandler.response.headers["Content-Type"] = "application/json"
requestHandler.response.out.write(result)
return new_func
| true
|
cdc1220a59bc68f04f8f3e4394e53cc555ee1742
|
Python
|
vonum/style-transfer
|
/color_transfer.py
|
UTF-8
| 1,371
| 2.890625
| 3
|
[
"MIT"
] |
permissive
|
import cv2
import numpy as np
from PIL import Image
class ColorTransfer:
# content_img - image containing desired content
# color_img - image containing desired color
def __init__(self, content_img, color_img):
self.content_img = content_img
self.color_img = color_img
def luminance_transfer(self, convert_type):
content_img = self.content_img
color_img = self.color_img
if convert_type == "yuv":
cvt_type = cv2.COLOR_BGR2YUV
inv_cvt_type = cv2.COLOR_YUV2BGR
elif convert_type == "ycrcb":
cvt_type = cv2.COLOR_BGR2YCR_CB
inv_cvt_type = cv2.COLOR_YCR_CB2BGR
elif convert_type == "luv":
cvt_type = cv2.COLOR_BGR2LUV
inv_cvt_type = cv2.COLOR_LUV2BGR
elif convert_type == "lab":
cvt_type = cv2.COLOR_BGR2LAB
inv_cvt_type = cv2.COLOR_LAB2BGR
content_cvt = self._convert(content_img, cvt_type)
color_cvt = self._convert(color_img, cvt_type)
c1, _, _ = self._split_channels(content_cvt)
_, c2, c3 = self._split_channels(color_cvt)
img = self._merge_channels([c1, c2, c3])
img = self._convert(img, inv_cvt_type).astype(np.float32)
return img
def _split_channels(self, image):
return cv2.split(image)
def _merge_channels(self, channels):
return cv2.merge(channels)
def _convert(self, img, cvt_type):
return cv2.cvtColor(img, cvt_type)
| true
|
bd2cea490068f55b3ceac7da893c8af8cefc628e
|
Python
|
marvinboe/DownstreamReplAge
|
/plothelpers.py
|
UTF-8
| 5,120
| 2.734375
| 3
|
[
"Apache-2.0"
] |
permissive
|
#######################################################################
#filename: 'plothelpers.py'
#Library with useful functions for plotting.
#
#Copyright 2018 Marvin A. Böttcher
#
#Licensed under the Apache License, Version 2.0 (the "License");
#you may not use this file except in compliance with the License.
#You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#Unless required by applicable law or agreed to in writing, software
#distributed under the License is distributed on an "AS IS" BASIS,
#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#See the License for the specific language governing permissions and
#limitations under the License.
########################################################################
import matplotlib
import matplotlib.pyplot
import itertools
import numpy as np
import math
def latexify(fig=None,fig_width=None, fig_height=None, columns=1):
""" Sets standard parameters of matplotlib. Call before plotting.
adapted from https://nipunbatra.github.io/blog/2014/latexify.html
Parameters
----------
fig_width : float, optional, inches
fig_height : float, optional, inches
columns : {1, 2}
"""
# Width and max height in inches for IEEE journals taken from
# computer.org/cms/Computer.org/Journal%20templates/transactions_art_guide.pdf
assert(columns in [1,2])
if fig_width is None:
fig_width = 2.825 if columns==1 else 5.788 # width in inches
# fig_width = 3.38 if columns==1 else 7. # width in inches
# fig_width = 3.176 if columns==1 else 6.491 # width in inches
# fig_width = 3.39 if columns==1 else 6.9 # width in inches
# 1 inch= 2.54 cm
if fig_height is None:
golden_mean = (np.sqrt(5)-1.0)/2.0 # Aesthetic ratio
fig_height = fig_width*golden_mean # height in inches
MAX_HEIGHT_INCHES = 8.0
if fig_height > MAX_HEIGHT_INCHES:
print("WARNING: fig_height too large:" + fig_height +
"so will reduce to" + MAX_HEIGHT_INCHES + "inches.")
fig_height = MAX_HEIGHT_INCHES
params = {#'backend': 'ps',
'axes.labelsize': 9, # fontsize for x and y labels (was 10)
'axes.titlesize': 9,
'font.size': 10, # was 10
'legend.fontsize': 8, # was 10
'xtick.labelsize': 8,
'ytick.labelsize': 8,
'text.usetex': True,
'figure.figsize': [fig_width,fig_height],
'font.family': 'sans-serif',
'font.sans-serif': ['Helvetica'],#['computer modern roman'], #avoid bold axis label
'text.latex.preamble': [r'\usepackage{helvet}',# set the normal font here
r'\usepackage[EULERGREEK]{sansmath}', # load up the sansmath so that math -> helvet
r'\sansmath' # <- tricky! -- gotta actually tell tex to use!
]
}
if fig:
print("texify figure dimensions set: ",fig_width,fig_height)
fig.set_size_inches((fig_width,fig_height),forward=True)
matplotlib.rcParams.update(params)
return params
def set_axes_size(width=None,height=None, ax=None):
""" width, height in inches """
if width is None: width=2.625
if height is None:
golden_mean = (np.sqrt(5)-1.0)/2.0 # Aesthetic ratio
height = width*golden_mean # height in inches
if ax is None: ax=plt.gca()
l = ax.figure.subplotpars.left
r = ax.figure.subplotpars.right
t = ax.figure.subplotpars.top
b = ax.figure.subplotpars.bottom
prevfigsize=ax.figure.get_size_inches()
prevfigw=prevfigsize[0]
prevfigh=prevfigsize[1]
figw=float(width)+(l+1-r)*prevfigw
figh=float(height)+(b+1-t)*prevfigh
newl=l*prevfigw/figw
newr=1-(1-r)*prevfigw/figw
newb=b*prevfigh/figh
newt=1-(1-t)*prevfigh/figh
ax.figure.set_size_inches(figw, figh,forward=True)
ax.figure.subplots_adjust(left=newl,right=newr,top=newt,bottom=newb)
def create_colorcyle(number,cmap=None,cmapname="viridis"):
if not cmap:
cmap = matplotlib.pyplot.get_cmap(cmapname)
indices = np.linspace(0, cmap.N, number)
my_colors = itertools.cycle([cmap(int(i)) for i in indices])
return my_colors
def plot_datacap(ax,x,y,xint=None,yint=None,color="black",lw=0.8,offset=None):
'''plots two short diagonal lines to denote capping of
data yaxis.
x,y: (center) position
xint,yint: interval taken up by lines
'''
if xint is None:
xint=1
if yint is None:
yint=1
xint=xint/2.
yint=yint/2.
if offset is None:
offset=yint
steps=20
xvals=np.linspace(x-xint,x+xint,steps)
yvals=np.linspace(y+yint,y-yint,steps)
ax.plot(xvals,yvals,color=color,lw=lw,zorder=5)
ax.plot(xvals,yvals+offset,color=color,lw=lw,zorder=5)
vertx=[xvals[0],xvals[0],xvals[-1],xvals[-1]]
verty=[yvals[0],yvals[0]+offset,yvals[-1]+offset,yvals[-1]]
xy=np.vstack([vertx,verty]).T
# print(xy)
patch=matplotlib.patches.Polygon(xy,facecolor='white',zorder=4)
ax.add_patch(patch)
| true
|
06052a9fc324c525d68ccf9953350acd19472552
|
Python
|
seva1232/bot
|
/StopGame.py
|
UTF-8
| 1,463
| 2.921875
| 3
|
[] |
no_license
|
import requests
import pprint
import re
from urllib.parse import quote_plus
import asyncio
import aiohttp
class StopError(Exception):
def __init__(self, code):
self.code = code
def formater_of_sg(dictionary, key):
if key in dictionary.keys():
return ", " + str(dictionary.get(key)) + ' <i>SGame</i>'
else:
return ''
def stop_game_request_parse(req):
scores = list(re.findall(r'(?<=<span class="tag">)...(?=</span></div></div>)', req))
titles = list(re.findall(r'(?<=" alt=")[-:\w, ]+(?="></a><div)', req))
rating = list(map(list, (zip(titles, list(map(float, scores))))))
for item in rating:
item[1] *= 10
item[1] = int(item[1])
return rating
async def stop_game(question):
url = "https://stopgame.ru/search/?s={}&where=games&sort=relevance".format(quote_plus(question))
async with aiohttp.ClientSession() as session:
async with session.get(url) as resp:
site_text = await resp.text()
if resp.status != 200:
raise StopError(resp.status)
answer = stop_game_request_parse(site_text)
return answer
if __name__ == "__main__":
title = input()
url = "https://stopgame.ru/search/?s={}&where=games&sort=relevance".format(quote_plus(title))
req = requests.get(url)
print(req)
ratings = stop_game_request_parse(req)
pprint.pprint(ratings)
| true
|
983f43121a99fc2dbf32d68ec65c4307f5513ef2
|
Python
|
Aasthaengg/IBMdataset
|
/Python_codes/p03471/s287821378.py
|
UTF-8
| 323
| 2.8125
| 3
|
[] |
no_license
|
N, Y =map(int, input().split())
c = 0
for n in range(N+1):
if c == 1:
break
for m in range(N-n+1):
l = N -n - m
if Y ==( n*10000 + m *5000 + l *1000) and (n + m + l) == N:
print(n , m , l)
c = 1
break
if c != 1:
print(-1 , -1 , -1)
| true
|
ad25c202478c205d86d2dd807547e16fc9d1e3ad
|
Python
|
ThiruSundar/Python-Tasks
|
/picdiff.py
|
UTF-8
| 208
| 2.59375
| 3
|
[] |
no_license
|
from PIL import Image, ImageChops
img1 = Image.open('pic1.jpg')
img2 = Image.open('pic2.jpg')
diff = ImageChops.difference(img1 , img2)
# print(diff.getbbox())
if diff.getbbox():
diff.show()
| true
|
f2b73a84db08cb59b790e2ce15c3044a37811faf
|
Python
|
cassianasb/python_studies
|
/fiap-on/8-5 - CaptureTemperatureJson.py
|
UTF-8
| 720
| 3.328125
| 3
|
[] |
no_license
|
import serial
import json
import time
from datetime import datetime
connection = ""
for port in range(10):
try:
connection = serial.Serial("COM"+str(port), 115200)
print("Conectado na porta: ", connection.portstr)
break
except serial.SerialException:
pass
if connection != "":
dicionary = {}
cont = 0
while cont < 10:
answer = connection.readline()
dicionary[str(datetime.now())] = [answer.decode('utf-8')[0:3]]
print(answer.decode('utf-8')[0:3])
cont+=1
with open('Temperature.json', "w") as file:
json.dump(dictionary, file)
connection.close()
print("Conexão encerrada")
else:
print("Sem portas disponíveis")
| true
|
fcc363802675bdd5ea0e46ae8b5d9c1c2d14bff6
|
Python
|
simonedeponti/CorsoPython-WPFExample
|
/ExampleWpfApp/ExampleWpfApp.py
|
UTF-8
| 449
| 2.8125
| 3
|
[] |
no_license
|
import wpf
from System.Windows import Application, Window
class MyWindow(Window):
def __init__(self):
wpf.LoadComponent(self, 'ExampleWpfApp.xaml')
self.greetButton.Click += self.greet
def greet(self, sender, event):
name = self.nameTextBox.Text
greeting = "Hello {name}".format(name=name)
self.outputTextBlock.Text = greeting
if __name__ == '__main__':
Application().Run(MyWindow())
| true
|
eed9e3c5784097a60c2a0d6c942303bb1808cfa8
|
Python
|
nathanesau/data_structures_and_algorithms
|
/_courses/cmpt225/practice4-solution/question14.py
|
UTF-8
| 407
| 3.546875
| 4
|
[] |
no_license
|
"""
write an algorithm that gets two binary trees
and checks if they have the same inOrder traversal.
"""
from binary_tree import in_order, build_tree7
def are_equal_in_order(tree1, tree2):
in_order1 = in_order(tree1)
in_order2 = in_order(tree2)
return in_order1 == in_order2
if __name__ == "__main__":
# test tree7
tree7 = build_tree7()
print(are_equal_in_order(tree7, tree7))
| true
|
50fd20e964720e7c5c049cdccc5ce32ecc4512a8
|
Python
|
greenrazer/deep-vis
|
/base/trianglecollection.py
|
UTF-8
| 1,037
| 3.75
| 4
|
[] |
no_license
|
class TriangleCollection:
def __init__(self, triangles):
self._triangles = triangles
def __iter__(self):
return iter(self._triangles)
def __add__(self, other):
temp = self.copy()
temp += other
return temp
def __iadd__(self, other):
for i in range(len(self._triangles)):
self._triangles[i] += other
return self
def __mul__(self, other):
temp = self.copy()
temp *= other
return temp
def __imul__(self, other):
for i in range(len(self._triangles)):
self._triangles[i] *= other
return self
def __truediv__(self, other):
temp = self.copy()
temp /= other
return temp
def __idiv__(self, other):
for i in range(len(self._triangles)):
self._triangles[i] /= other
return self
def copy(self):
output = []
for tri in self._triangles:
output.append(tri.copy())
return TriangleCollection(output)
| true
|
5427e381f30c5d8216d54c8a7aa7d5b786075d52
|
Python
|
mingsalt/START_UP_PYTHON
|
/6st/hw1.py
|
UTF-8
| 882
| 3.71875
| 4
|
[] |
no_license
|
#hw1 기계와 다른숫자를 가지고 있는 카드게임
jay=input("Jay가 선택한 카드(1~9에서 5장):").split()
jay2=list(map(int,jay))
emily=input("Emily가 선택한 카드(1~9에서 5장):").split()
emily2=list(map(int,emily))
from array import array
import random
com=random.sample(range(1,10),3)
com1=com[0]
com2=com[1]
com3=com[2]
print(f"기계가 선택한 카드(1~9에서 3장) : {com1} {com2} {com3} ")
if len(jay2)==5 & len(emily)==5:
a=set(jay2)
b=set(emily2)
c=set(com)
d=a-c
e=b-c
num_j=len(d)
num_e=len(e)
if num_j>num_e :
print(f"Emily대 Jay는 {num_j}:{num_e}로 Jay 승 !")
elif num_j<num_e :
print(f"Emily대 Jay는 {num_j}:{num_e}로 Emily 승 !")
else :
print("무승부입니다!")
else :
print("카드 5장을 다시 선택하세요.")
| true
|
6f8eed9c506b76d0f9bf3a120355eff27f3b8be8
|
Python
|
chika-ibegbu/wine_quality
|
/wine project.py
|
UTF-8
| 3,769
| 3.1875
| 3
|
[] |
no_license
|
# -*- coding: utf-8 -*-
"""
Created on Thu Aug 12 21:41:26 2021
@author: Dell
"""
#import the libraries
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
%matplotlib inline
#import the dataset
df=pd.read_csv(r"C:\Users\Dell\Downloads\winequality-red.csv")
#use the dataframe as wine
wine=df
wine
#Data profiling
wine.head()
wine.tail()
type(wine)
len(wine)
wine.shape
wine.ndim
wine.describe
wine.isnull().sum()
wine.duplicated()
wine.info()
#Check the amount of duplicate values
wine.drop_duplicates()
wine2=wine.drop_duplicates()
#Get the Unique values
wine2["pH"].unique()
len(wine2["pH"].unique())
wine3=wine2["quality"].unique()
#find the correlation of indicators
correlation=wine2.corr()
correlation()
#Plot heatmap
plt.figure(figsize=(20, 17))
matrix = np.triu(wine2.corr())
sns.heatmap(wine2.corr(), annot=True,
linewidth=.8, mask=matrix, cmap="rocket")
#Classify the quality of wine according to its alcohol content
wine2.groupby("quality")["alcohol"].mean().plot(kind='bar')
plt.show()
#cat plot
sns.catplot(x="quality", y="fixed acidity", data=wine2, kind="box")
sns.catplot(x="quality", y="volatile acidity", data=wine2, kind="box")
sns.catplot(x="quality", y="citric acid", data=wine2, kind="box")
sns.catplot(x="quality", y="residual sugar", data=wine2, kind="box")
sns.catplot(x="quality", y="chlorides", data=wine2, kind="box")
sns.catplot(x="quality", y="density", data=wine2, kind="box")
sns.catplot(x="quality", y="pH", data=wine2, kind="box")
sns.catplot(x="quality", y="sulphates", data=wine2, kind="box")
sns.catplot(x="quality", y="alcohol", data=wine2, kind="box")
acidity_count = wine2["fixed acidity"].value_counts().reset_index()
acidity_count
plt.figure(figsize=(30, 10))
plt.style.use("ggplot")
sns.barplot(x=acidity_count["index"], y=acidity_count["fixed acidity"])
plt.title("TYPE OF ACIDITY WITH QUALITY", fontsize=20)
plt.xlabel("ACIDITY", fontsize=20)
plt.ylabel("COUNT", fontsize=20)
plt.show()
#DISTRIBUTION LIST
plt.style.use("ggplot")
sns.distplot(wine2["pH"]); # using displot here
plt.title("DISTRIBUTION OF pH FOR DIFFERENT QUALITIES", fontsize=18)
plt.xlabel("pH", fontsize=20)
plt.ylabel("COUNT", fontsize=20)
plt.show()
#VIOLINPLOT---------------
sns.violinplot(x="quality", y="fixed acidity", data=wine2)
sns.violinplot(x="quality", y="pH", data=wine2)
sns.violinplot(x="quality", y="density", data=wine2)
sns.violinplot(x="quality", y="residual sugar", data=wine2)
sns.violinplot(x="quality", y="alcohol", data=wine2)
sns.violinplot(x="quality", y="chlorides", data=wine2)
#histogram---------------------------------------
def draw_histograms(wine2, variables, n_rows, n_cols):
fig=plt.figure(figsize=(12,10))
for i, var_name in enumerate(variables):
ax=fig.add_subplot(n_rows,n_cols,i+1)
plt.hist(df[var_name],edgecolor='black')
ax.set_title(var_name.upper())
fig.tight_layout()
plt.show()
draw_histograms(wine2, wine2.columns, 4, 3)
#BOXPLOT-------------------------
plt.figure(figsize=(15,10))
for i,var_name in enumerate(list(wine2.columns)):
plt.subplot(4,3,i+1)
sns.boxplot(x=var_name, data=wine2)
plt.title(var_name.upper())
plt.xlabel(None)
plt.ylabel(None)
plt.tight_layout()
plt.show()
#stripplots--------
sns.stripplot(x="quality", y="fixed acidity", data=wine2)
sns.stripplot(x="quality", y="pH", data=wine2)
sns.stripplot(x="quality", y="density", data=wine2)
sns.stripplot(x="quality", y="residual sugar", data=wine2)
sns.stripplot(x="quality", y="alcohol", data=wine2)
sns.stripplot(x="quality", y="chlorides", data=wine2)
| true
|
8b15cf8a455e7199288f699baed76ac94719f1a8
|
Python
|
jiandie012/python
|
/.idea/Homework/9x9.py
|
UTF-8
| 136
| 2.875
| 3
|
[] |
no_license
|
print('\n'.join([' '.join(["%2s x%2s = %2s"%(j,i,i*j) for j in range(1,i+1)]) for i in range(1,10)]))
#print ([i for i in range(10)])
| true
|
21c0519f4186b2c8015d6f285d3501c39816bd17
|
Python
|
Spidey03/covid_19_dashboard
|
/covid_dashboard/interactors/storages/.~c9_invoke_wj8lk6.py
|
UTF-8
| 927
| 2.515625
| 3
|
[] |
no_license
|
from abc import ABC
from abc import abstractmethod
from covid_dashboard.interactors.storages.dtos\
import (DailyStateDataDto, CumulativeStateDataDto,
DailyDistrictDataDto, CumulativeDistrictDataDto)
class CovidStorageInterface(ABC):
@abstractmethod
def is_state_id_valid(self, state_id: int):
pass
@abstractmethod
def is_district_id_valid(self, district_id: int):
pass
@abstractmethod
def get_state_wise_daily_data(self, state_id: int) -> DailyStateDataDto:
pass
@abstractmethod
def get_state_wise_cumulative_data(
self, state_id: int) -> CumulativeStateDataDto:
pass
@abstractmethod
def get_district_wise_daily_data(self, district_id) -> DailyDistrictDataDto:
pass
@abstractmethod
def get_district_wise_cumulative_data(self,
district_id) -> CumulativeDistrictDataDto:
pass
| true
|
f5c0b13b7aad7c787c5f95ef4a78ccf3a96e5d6b
|
Python
|
c-moon-2/Universal_Specification_Verification_Program
|
/pylib/lan_search.py
|
UTF-8
| 394
| 2.859375
| 3
|
[] |
no_license
|
import psutil
def lan_info():
# LAN
print ("--------- LAN INFO ------------------------------------------------------------------")
lanInfo=psutil.net_if_addrs()
for card_name in lanInfo:
print("LAN 이름 : ", card_name)
print(" - IP 주소 : ", lanInfo[card_name][1].address)
print()
print()
| true
|
b5acbc78d32226149fc59994092977a01a5abb3a
|
Python
|
peterts/adventofcode2020
|
/adventofcode2020/day4.py
|
UTF-8
| 2,241
| 2.6875
| 3
|
[] |
no_license
|
from functools import partial
from typing import Literal
from more_itertools import quantify
from pydantic import BaseModel, ValidationError, conint, constr, validator
from adventofcode2020.utils import (
DataName,
fetch_input_data_if_not_exists,
pattern_extract_all,
print_call,
read,
submit,
)
REQUIRED_FIELDS = {"byr", "iyr", "eyr", "hgt", "hcl", "ecl", "pid"}
class PassportA(BaseModel):
byr: str
iyr: str
eyr: str
hgt: str
hcl: str
ecl: str
pid: str
cid: str = ""
class PassportB(BaseModel):
byr: conint(ge=1920, le=2002)
iyr: conint(ge=2010, le=2020)
eyr: conint(ge=2020, le=2030)
hgt: constr(regex="^\d+(?:cm|in)")
hcl: constr(regex="^#[a-z0-9]{6}$")
ecl: Literal["amb", "blu", "brn", "gry", "grn", "hzl", "oth"]
pid: constr(regex="^\d{9}$")
cid: str = ""
@validator("hgt")
def validate_height(cls, height):
num, unit = int(height[:-2]), height[-2:]
if unit == "cm" and (num < 150 or num > 195):
raise ValueError
if unit == "in" and (num < 59 or num > 75):
raise ValueError
return height
def _validate_with_model(model, data):
try:
model(**data)
return True
except ValidationError:
return False
@print_call
def solve_part1(file_name):
passports = read(file_name).split("\n\n")
return quantify(map(_parse_passport, passports), pred=partial(_validate_with_model, PassportA))
@print_call
def solve_part2(file_name):
passports = read(file_name).split("\n\n")
return quantify(map(_parse_passport, passports), pred=partial(_validate_with_model, PassportB))
def _parse_passport(passport):
return dict(pattern_extract_all("([a-z]{3}):(\S+)", passport, str, str))
if __name__ == "__main__":
fetch_input_data_if_not_exists()
part = "a"
solve_part1(DataName.SAMPLE_1)
answer = solve_part1(DataName.PUZZLE)
submit(answer, part)
part = "b"
solve_part2(DataName.SAMPLE_1)
solve_part2(DataName.SAMPLE_2)
solve_part2(DataName.SAMPLE_3)
answer = solve_part2(DataName.PUZZLE)
submit(answer, part)
| true
|
2660cd2892c54dcb6abe99a15beb21ca9b5ff816
|
Python
|
dcs4cop/xcube
|
/test/test_mixins.py
|
UTF-8
| 3,628
| 2.734375
| 3
|
[
"MIT"
] |
permissive
|
import unittest
from test.mixins import AlmostEqualDeepMixin
class AlmostEqualDeepMixinTest(unittest.TestCase, AlmostEqualDeepMixin):
def test_int_and_float_7_places_default(self):
self.assertAlmostEqualDeep(0, 0.8e-8)
with self.assertRaises(AssertionError):
self.assertAlmostEqualDeep(0, 0.8e-7)
def test_int(self):
self.assertAlmostEqualDeep(45, 45)
with self.assertRaises(AssertionError):
self.assertAlmostEqualDeep(45, 54)
def test_str(self):
self.assertAlmostEqualDeep("abc", "abc")
with self.assertRaises(AssertionError):
self.assertAlmostEqualDeep("abc", "Abc")
def test_bool(self):
self.assertAlmostEqualDeep(True, True)
with self.assertRaises(AssertionError):
self.assertAlmostEqualDeep(True, False)
def test_set(self):
expected = {'a', 1.1256, True}
self.assertAlmostEqualDeep(expected, expected)
self.assertAlmostEqualDeep(expected, {'a', 1.1256, True})
with self.assertRaises(AssertionError):
# We currently don't test sets
self.assertAlmostEqualDeep(expected, {'a', 1.1251, True}, places=2)
with self.assertRaises(AssertionError):
self.assertAlmostEqualDeep(expected, {'a', 1.1256, False})
def test_dict(self):
expected = {'a': 1.1256, 'b': 5}
self.assertAlmostEqualDeep(expected, expected)
self.assertAlmostEqualDeep(expected, {'a': 1.1256, 'b': 5})
self.assertAlmostEqualDeep(expected, {'a': 1.1251, 'b': 5}, places=3)
with self.assertRaises(AssertionError):
self.assertAlmostEqualDeep(expected, {'a': 1.1251, 'b': 5}, places=4)
with self.assertRaises(AssertionError):
self.assertAlmostEqualDeep(expected, {'a': 1.1256, 'b': 6})
with self.assertRaises(AssertionError):
self.assertAlmostEqualDeep(expected, [1, 2, 3])
with self.assertRaises(AssertionError):
self.assertAlmostEqualDeep(expected, 3456)
def test_list(self):
expected = ['a', 1.1256, True]
self.assertAlmostEqualDeep(expected, expected)
self.assertAlmostEqualDeep(expected, ['a', 1.1256, True])
self.assertAlmostEqualDeep(expected, ('a', 1.1256, True))
self.assertAlmostEqualDeep(expected, ['a', 1.1251, True], places=3)
with self.assertRaises(AssertionError):
self.assertAlmostEqualDeep(expected, ['a', 1.1251, True], places=4)
with self.assertRaises(AssertionError):
self.assertAlmostEqualDeep(expected, ['a', 1.1256, False], places=4)
with self.assertRaises(AssertionError):
self.assertAlmostEqualDeep(expected, [1, 2, 3])
with self.assertRaises(AssertionError):
self.assertAlmostEqualDeep(expected, 3456)
def test_list_dict_tuple(self):
expected = [
{'a': True, 'b': (1.1256, 45, True)},
{'a': False, 'b': (2.1256, 46, False)}
]
self.assertAlmostEqualDeep(expected, expected)
self.assertAlmostEqualDeep(expected, [
{'a': True, 'b': (1.1256, 45, True)},
{'a': False, 'b': (2.1256, 46, False)}
])
self.assertAlmostEqualDeep(expected, [
{'a': True, 'b': (1.1251, 45, True)},
{'a': False, 'b': (2.1259, 46, False)}
], places=3)
with self.assertRaises(AssertionError):
self.assertAlmostEqualDeep(expected, [
{'a': True, 'b': (1.1251, 45, True)},
{'a': False, 'b': (2.1259, 46, False)}
], places=4)
| true
|
b148c13a5210e95d91d0c2f5ff6799b5f66970e8
|
Python
|
DevlinaC/Testing_clustering
|
/plot_agglomerative_dendrogram.py
|
UTF-8
| 5,251
| 3
| 3
|
[] |
no_license
|
"""
=========================================
Plot Hierarchical Clustering Dendrogram
=========================================
This example plots the corresponding dendrogram of a hierarchical clustering
using Agglomerative Clustering and the dendrogram method available in scipy
The one in sklearn doesn't work!
"""
import itertools as itts
from pathlib import Path
from operator import itemgetter
from optparse import OptionParser, OptionValueError
import matplotlib.pyplot as plt
from scipy.cluster.hierarchy import dendrogram, linkage
import scipy.cluster.hierarchy as sch
import scipy.spatial.distance as ssd
import numpy as np
import pandas as pd
# make it fancy!
def fancy_dendrogram(*args, **kwargs):
max_d = kwargs.pop('max_d', None)
if max_d and 'color_threshold' not in kwargs:
kwargs['color_threshold'] = max_d
annotate_above = kwargs.pop('annotate_above', 0)
ddata = dendrogram(*args, **kwargs)
if not kwargs.get('no_plot', False):
plt.title('Hierarchical Clustering Dendrogram (truncated)')
plt.xlabel('sample index or (cluster size)')
plt.ylabel('distance')
for i, d, c in zip(ddata['icoord'], ddata['dcoord'], ddata['color_list']):
x = 0.5 * sum(i[1:3])
y = d[1]
if y > annotate_above:
plt.plot(x, y, 'o', c=c)
plt.annotate("%.3g" % y, (x, y), xytext=(0, -5),
textcoords='offset points',
va='top', ha='center')
if max_d:
plt.axhline(y=max_d, c='k')
return ddata
"""
def dist_matrix_to_1d(M):
A =[]
for ix, row in enumerate(M[:-1]):
for iy, val in enumerate(row[ix+1:], ix+1):
A.append(val)
return np.array(A)
"""
# Create linkage matrix and then plot the dendrogram
def plot_dendrogram(model, threshold):
plt.title('Hierarchical Clustering Dendrogram')
# Plot the corresponding dendrogram
# we can use cut-off to cluster/colour the histogram
# Break into clusters based on cutoff
ind = sch.fcluster(model, threshold, 'distance')
#dendrogram(model, orientation='right', color_threshold=threshold) # show the whole tree
max_display_levels=10
fancy_dendrogram(model, truncate_mode='lastp', p=max_display_levels, max_d = threshold)
plt.show()
def _check_inputFile(option, opt_str, value, parser):
f_path = Path(value)
if not f_path.is_file():
raise OptionValueError(f"Cannot get {str(f_path)} file")
setattr(parser.values, option.dest, Path(f_path))
parser.values.saved_infile = True
def read_data(inFile) -> pd.DataFrame:
"""
Convert file to pandas dataframe
Arguments:
inFile {file path}
Returns:
[pd.DataFrame] -- [similarity matrix]
"""
def clean_line(x: str): return x.strip().split()
data_dict = {}
with open(inFile) as oF:
for coins in map(clean_line, itts.islice(oF, 0, None)):
pdb1, pdb2, value = coins
if pdb1 not in data_dict:
data_dict[pdb1] = {}
data_dict[pdb1][pdb2] = {
'value': float(value), 'x': None, 'y': None}
if pdb2 not in data_dict:
data_dict[pdb2] = {}
data_dict[pdb2][pdb1] = {
'value': float(value), 'x': None, 'y': None}
data_dict[pdb1][pdb1] = {
'value': 1.0, 'x': None, 'y': None}
data_dict[pdb2][pdb2] = {
'value': 1.0, 'x': None, 'y': None}
keys = sorted(data_dict.keys())
for ix, k1 in enumerate(keys):
for iy, k2 in enumerate(keys):
data_dict[k1][k2].update(dict(x=ix, y=iy))
Y = itemgetter('y')
M = pd.DataFrame(
[[x['value']
for x in sorted(data_dict[k].values(), key=Y)] for k in keys],
index=keys, columns=keys)
return M
def build_distance_matrix(data: pd.DataFrame):
def dist(x): return 1.0/(x*x)
data_out = np.vectorize(dist)(data.values)
np.fill_diagonal(data_out, 0)
return data_out
if __name__ == "__main__":
options_parser = OptionParser()
options_parser.add_option("-i", "--input_file",
dest="input_file", type='str',
help="input FILE",
metavar="FILE",
action='callback',
callback=_check_inputFile)
options_parser.add_option("-c", "--cutoff",
dest="cutoff", type='float',
help="clustering cutoff",
metavar="FLOAT")
(options, args) = options_parser.parse_args()
in_file = Path(options.input_file)
cutoff = float(options.cutoff)
data = read_data(in_file)
dist = build_distance_matrix(data)
threshold = 1/(cutoff*cutoff)
data1D = ssd.squareform(dist)
dist_test = linkage(data1D, method='complete') # Complete linkage
# maximum linkage uses
# the maximum distances between all observations of the two sets
plot_dendrogram(dist_test,threshold)
| true
|
7578e6fc6ac68abcfdeb56e9d2a2442a9a8a8f41
|
Python
|
rishabhgit0608/FaceRecognition
|
/face_detection.py
|
UTF-8
| 509
| 2.640625
| 3
|
[] |
no_license
|
import cv2
cam=cv2.VideoCapture(0)
classifier=cv2.CascadeClassifier("haarcascade_frontalface_alt.xml")
while True:
ret,frame=cam.read()
if not ret:
continue
faces=classifier.detectMultiScale(frame,1.3,5)
for face in faces:
x,y,w,h=face # tuple unpacking
cv2.rectangle(frame,(x,y),(x+w,y+h),(0,255,0),2) # start ,diagnol point,color,thickness
cv2.imshow("frame",frame)
if cv2.waitKey(1) & 0xFF==ord('q'):
break
cam.release()
cv2.destroyAllWindows()
| true
|
f13b41ddfa3e946147e6b5e06b15fe56102d6283
|
Python
|
MoCuishle28/blogproject-LearnDjango
|
/comments/models.py
|
UTF-8
| 932
| 2.8125
| 3
|
[] |
no_license
|
from django.db import models
from django.utils.six import python_2_unicode_compatible
# Create your models here.
# python_2_unicode_compatible 装饰器用于兼容 Python2
@python_2_unicode_compatible
class Comment(models.Model):
"""
保存评论用户的 name(名字)、email(邮箱)、url(个人网站)
用户发表的内容将存放在 text 字段里 created_time 记录评论时间
这个评论是关联到某篇文章(Post)的 由于一个评论只能属于一篇文章,一篇文章可以有多个评论 是一对多的关系
因此这里我们使用了 ForeignKey
"""
name = models.CharField(max_length=100)
email = models.EmailField(max_length=255)
url = models.URLField(blank=True)
text = models.TextField()
created_time = models.DateTimeField(auto_now_add=True)
post = models.ForeignKey('blog.Post')
def __str__(self):
return self.text[:20]
| true
|
748b94d4c533dd90895386657bc3c9acceeca617
|
Python
|
natanaelfelix/Estudos
|
/Sessão 4/Desafio POO/classes/contacorrente.py
|
UTF-8
| 529
| 2.859375
| 3
|
[] |
no_license
|
from conta import Conta
class ContaCorrente(Conta):
def __init__(self, agencia, nconta, saldo, limite = 1000):
super().__init__(agencia, conta, saldo)
self.agencia = agencia
self.nconta = nconta
self.saldo = saldo
def saque(self, valor):
if (self.saldo + self.limite) < valor:
print('Saldo insuficiente')
return
self.saldo = self.saldo - valor
self.detalhes()
def depositar (self, valor):
self.saldo = self.saldo + valor
| true
|
38daf30c715781252b9c3396cade106d9b271b77
|
Python
|
merlin2181/Coffee-Machine
|
/Problems/Small scale/task.py
|
UTF-8
| 164
| 3.453125
| 3
|
[] |
no_license
|
lowest = float(input())
while True:
num = input()
if num == ".":
print(lowest)
break
if lowest > float(num):
lowest = float(num)
| true
|
98e30eec27a2709fff295f516c86a4b684957513
|
Python
|
kyithar/class
|
/dataset_clean/python/ratingcsv_reader.py
|
UTF-8
| 1,104
| 2.96875
| 3
|
[] |
no_license
|
import pandas as pd
import numpy as np
def ratingreader(condition_tmp):
hourly = 3600
daily = 86400 # second to day
yearly = 31536000
condition = condition_tmp # choose 1) hourly, 2)daily, 3) yearly
##### load rating.csv ##########
print("Start cleaning 'ratings.csv'")
df_rate = pd.read_csv('dataset_original/ratings.csv', encoding='utf-8')
if condition == 'hourly':
df_rate[condition]=np.ceil(df_rate['timestamp']/hourly)
df_rate=df_rate.sort_values([condition], ascending=True).drop('timestamp',1)
elif condition == 'daily':
df_rate[condition]=np.ceil(df_rate['timestamp']/daily)
df_rate =df_rate.sort_values([condition], ascending=True).drop('timestamp',1)
else:
df_rate[condition]=np.ceil(df_rate['timestamp']/yearly)
df_rate =df_rate.sort_values([condition], ascending=True).drop('timestamp',1)
# print(df_rate.head(3))
#### Save as CSV #####
df_rate.to_csv('dataset_processed/rating_processed.csv')
del df_rate
print("rating_process.csv is succuseffuly saved in 'dataset_processed/'")
| true
|
b8f4b96b88405d50eb51987b5cfd18cbf0621428
|
Python
|
thuliosenechal/Codewars
|
/Counting Duplicates Letters/test_duplicated_letters.py
|
UTF-8
| 1,166
| 3.4375
| 3
|
[] |
no_license
|
import unittest
from duplicated_letters import duplicate_count
class TestDuplicatedLetters(unittest.TestCase):
def test_case_a(self):
string = ''
self.assertEqual(duplicate_count(string), 0)
def test_case_b(self):
string = 'abcde'
self.assertEqual(duplicate_count(string), 0)
def test_case_c(self):
string = 'abcdeaa'
self.assertEqual(duplicate_count(string), 1)
def test_case_d(self):
string = 'abcdeaB'
self.assertEqual(duplicate_count(string), 2)
def test_case_e(self):
string = 'Indivisibilities'
self.assertEqual(duplicate_count(string), 2)
def test_case_f(self):
string = 'abcdefghijklmnopqrstuvwxyz'
self.assertEqual(duplicate_count(string), 0)
def test_case_g(self):
string = 'abcdefghijklmnopqrstuvwxyz' + 'aaAb'
self.assertEqual(duplicate_count(string), 2)
def test_case_h(self):
lowercase = 'abcdefghijklmnopqrstuvwxyz'
uppercase = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
self.assertEqual(duplicate_count(lowercase+lowercase), 26)
if __name__ == '__main__':
unittest.main()
| true
|
7405c5c43fa1fc005a248818e00a49747a4b361e
|
Python
|
github/codeql
|
/python/ql/test/experimental/dataflow/typetracking/test.py
|
UTF-8
| 4,831
| 2.96875
| 3
|
[
"MIT",
"LicenseRef-scancode-python-cwi",
"LicenseRef-scancode-other-copyleft",
"GPL-1.0-or-later",
"LicenseRef-scancode-free-unknown",
"Python-2.0"
] |
permissive
|
def get_tracked():
x = tracked # $tracked
return x # $tracked
def use_tracked_foo(x): # $tracked
do_stuff(x) # $tracked
def foo():
use_tracked_foo(
get_tracked() # $tracked
)
def use_tracked_bar(x): # $tracked
do_stuff(x) # $tracked
def bar():
x = get_tracked() # $tracked
use_tracked_bar(x) # $tracked
def use_tracked_baz(x): # $tracked
do_stuff(x) # $tracked
def baz():
x = tracked # $tracked
use_tracked_baz(x) # $tracked
def id(x): # $tracked
return x # $tracked
def use_tracked_quux(x): # $ MISSING: tracked
do_stuff(y) # call after return -- not tracked in here.
def quux():
x = tracked # $tracked
y = id(x) # $tracked
use_tracked_quux(y) # not tracked out of call to id.
g = None
def write_g(x): # $tracked
global g
g = x # $tracked
def use_g():
do_stuff(g) # $tracked
def global_var_write_test():
x = tracked # $tracked
write_g(x) # $tracked
use_g()
def test_import():
import mymodule
mymodule.x # $tracked
y = mymodule.func() # $tracked
y # $tracked
mymodule.z # $tracked
def to_inner_scope():
x = tracked # $tracked
def foo():
y = x # $ tracked
return y # $ tracked
also_x = foo() # $ tracked
print(also_x) # $ tracked
def from_parameter_default():
x_alias = tracked # $tracked
def outer(x=tracked): # $tracked
print(x) # $tracked
def inner():
print(x) # $ tracked
print(x_alias) # $tracked
return x # $tracked
also_x = outer() # $tracked
print(also_x) # $tracked
# ------------------------------------------------------------------------------
# Function decorator
# ------------------------------------------------------------------------------
def my_decorator(func):
# This part doesn't make any sense in a normal decorator, but just shows how we
# handle type-tracking
func() # $tracked
def wrapper():
print("before function call")
val = func() # $ MISSING: tracked
print("after function call")
return val # $ MISSING: tracked
return wrapper
@my_decorator
def get_tracked2():
return tracked # $tracked
@my_decorator
def unrelated_func():
return "foo"
def use_funcs_with_decorators():
x = get_tracked2() # $ tracked
y = unrelated_func()
# ------------------------------------------------------------------------------
def expects_int(x): # $int
do_int_stuff(x) # $int
def expects_string(x): # $str
do_string_stuff(x) # $str
def redefine_test():
x = int(5) # $int
expects_int(x) # $int
x = str("Hello") # $str
expects_string(x) # $str
# ------------------------------------------------------------------------------
# Tracking of self in methods
# ------------------------------------------------------------------------------
class Foo(object):
def meth1(self):
do_stuff(self)
def meth2(self): # $ tracked_self
do_stuff(self) # $ tracked_self
def meth3(self): # $ tracked_self
do_stuff(self) # $ tracked_self
class Bar(Foo):
def meth1(self): # $ tracked_self
do_stuff(self) # $ tracked_self
def meth2(self):
do_stuff(self)
def meth3(self):
do_stuff(self)
def track_self(self): # $ tracked_self
self.meth1() # $ tracked_self
super().meth2()
super(Bar, self).meth3() # $ tracked_self
# ------------------------------------------------------------------------------
# Tracking of attribute lookup after "long" import chain
# ------------------------------------------------------------------------------
def test_long_import_chain():
import foo.bar
foo.baz
x = foo.bar.baz # $ tracked_foo_bar_baz
do_stuff(x) # $ tracked_foo_bar_baz
class Example(foo.bar.baz): # $ tracked_foo_bar_baz
pass
def test_long_import_chain_full_path():
from foo.bar import baz # $ tracked_foo_bar_baz
x = baz # $ tracked_foo_bar_baz
do_stuff(x) # $ tracked_foo_bar_baz
# ------------------------------------------------------------------------------
# Global variable to method body flow
# ------------------------------------------------------------------------------
some_value = get_tracked() # $ tracked
other_value = get_tracked() # $ tracked
print(some_value) # $ tracked
print(other_value) # $ tracked
class MyClass(object):
# Since we define some_value method on the class, flow for some_value gets blocked
# into the methods
def some_value(self):
print(some_value) # $ tracked
print(other_value) # $ tracked
def other_name(self):
print(some_value) # $ tracked
print(other_value) # $ tracked
def with_global_modifier(self):
global some_value
print(some_value) # $ tracked
| true
|
b0faaa978fd6117a596abc563a2e8296777af5ff
|
Python
|
sampaioveiga/python_network_tutorial
|
/examples/03/client.py
|
UTF-8
| 245
| 2.625
| 3
|
[] |
no_license
|
import socket
server = "192.168.116.1"
port = 12345
client = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
client.connect((server, port))
client.send(b"Hi from client!")
response = client.recv(4096)
print(response.decode())
client.close()
| true
|
03d2379418e38349224af6b10e844edf9b682118
|
Python
|
makovalab-psu/NoiseCancellingRepeatFinder
|
/reproduce/map_onto_simulated_reads.py
|
UTF-8
| 9,352
| 2.984375
| 3
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env python
"""
Map intervals from a "genome" to positions on simulated reads.
"""
from sys import argv,stdin,stdout,stderr,exit
from gzip import open as gzip_open
def usage(s=None):
message = """
usage: cat <intervals_file> | map_onto_simulated_reads [options]
--cigars=<filename> (mandatory) cigar strings file (an input file)
--stranded=<columns> (cumulative) input columns which are presumed to have
strand info (+ or -) as their final character;
<columns> is a comma-separated list
--truncate truncate mappings at the end of reads; actaully
mappings are always truncated, but by default when
this happens it is indicated as "<0" or ">1000"
(assuming the read length is 1000); this option
just removes the "<" and ">" indicators.
--sortby:reads sort output by read positions on the genome
(by default, output is interval-by-interval in the
order intervals are read)
--separators print separating lines between different intervals
or reads
Given a genome from which simulated reads were sampled by simulate_reads_v4,
and the corresponding cigars file, map intervals (or positions) from the genome
to the corresponding positions on the simulated reads.
Intervals are one per line, <chrom> start> <end>. Coordinates are zero-based
and exclude the end position. Any additional columns are copied to the
output."""
if (s == None): exit (message)
else: exit ("%s\n%s" % (s,message))
def main():
global debug
# parse the command line
cigarFilename = None
strandedTags = None
indicateTruncation = True
sortByReads = False
separateIntervals = False
debug = []
for arg in argv[1:]:
if ("=" in arg):
argVal = arg.split("=",1)[1]
if (arg.startswith("--cigars=")) or (arg.startswith("--cigar=")):
cigarFilename = argVal
elif (arg.startswith("--stranded=")):
if (strandedTags == None): strandedTags = set()
for col in map(int,argVal.split(",")):
strandedTags.add(col)
elif (arg == "--truncate"):
indicateTruncation = False
elif (arg == "--sortby:reads"):
sortByReads = True
elif (arg == "--separators"):
separateIntervals = True
elif (arg == "--debug"):
debug += ["debug"]
elif (arg.startswith("--debug=")):
debug += argVal.split(",")
elif (arg.startswith("--")):
usage("unrecognized option: %s" % arg)
else:
usage("unrecognized option: %s" % arg)
if (cigarFilename == None):
usage("you need to give me a cigar strings file")
if (strandedTags != None):
strandedTags = [col-4 for col in strandedTags]
strandedTags.sort()
# read the cigar strings
if (cigarFilename.endswith(".gz")) or (cigarFilename.endswith(".gzip")):
cigarF = gzip_open(cigarFilename,"rt")
else:
cigarF = file(cigarFilename,"rt")
chroms = []
chromToCigars = {}
nameToGenome = {}
for (name,chrom,strand,gStart,gEnd,cigar) in read_cigars(cigarF):
if (strand == "-"): cigar = cigar[::-1] # reverse order of cigar ops
(gLength,rLength) = cigar_lengths(cigar)
if (chrom not in chromToCigars):
chroms += [chrom]
chromToCigars[chrom] = []
chromToCigars[chrom] += [(gStart,gEnd,gLength,name,strand,rLength,cigar)]
nameToGenome[name] = (gStart,gEnd)
cigarF.close()
for chrom in chromToCigars:
chromToCigars[chrom].sort()
# process the intervals
oppositeStrand = {"+":"-", "-":"+"}
chromToMappings = {}
for chrom in chroms: chromToMappings[chrom] = []
haveOutput = False
for (chrom,gStart,gEnd,tags) in read_intervals(stdin):
if (chrom not in chromToCigars): continue
cigarInfo = chromToCigars[chrom]
needSeparator = separateIntervals
for (name,strand,rStart,rEnd) in map_interval(cigarInfo,gStart,gEnd):
if (indicateTruncation):
if (type(rStart) == tuple): rStart = "%s%d" % rStart
if (type(rEnd) == tuple): rEnd = "%s%d" % rEnd
else:
if (type(rStart) == tuple): rStart = rStart[1]
if (type(rEnd) == tuple): rEnd = rEnd[1]
if (tags == None):
oTags = ""
else:
oTags = list(tags)
if (strand == "-") and (strandedTags != None):
for col in strandedTags:
if (col >= len(oTags)): continue
tailCh = oTags[col][-1]
if (tailCh in "+-"):
oTags[col] = oTags[col][:-1] + oppositeStrand[tailCh]
oTags = "\t" + "\t".join(oTags)
mappedStr = "%s\t%d\t%d\t%s\t%s\t%s\t%s" \
% (chrom,gStart,gEnd,name,rStart,rEnd,oTags)
if (sortByReads):
(s,e) = nameToGenome[name]
chromToMappings[chrom] += [(s,e,rStart,rEnd,mappedStr)]
else:
if (haveOutput) and (needSeparator): print ""
print mappedStr
haveOutput = True
needSeparator = False
if (sortByReads):
haveOutput = False
for chrom in chroms:
chromToMappings[chrom].sort()
needSeparator = separateIntervals
for (_,_,_,_,mappedStr) in chromToMappings[chrom]:
if (haveOutput) and (needSeparator): print ""
print mappedStr
haveOutput = True
needSeparator = False
def map_interval(cigarInfo,gStart,gEnd):
# Note that insertions are nucleotides that are in the read but not in the
# genome; deletions are nucleotides that are in the genome but not in the
# read
# Also note that the cigar operations list has already been reversed if
# the read was pulled from revcomp of genome
if ("mapping" in debug):
print >>stderr, "mapping %d..%d" % (gStart,gEnd)
for (s,e,gLength,name,strand,rLength,cigar) in cigarInfo:
if (e <= gStart): continue
if (s >= gEnd): break
if ("mapping" in debug):
print >>stderr, " intersects with %d..%d" % (s,e)
(gPos,rPos) = (s,0)
rStart = rEnd = None
if (gStart < s): rStart = ("<",0)
for (count,op) in cigar:
if ("mapping" in debug):
print >>stderr, " g=%d r=%d" % (gPos,rPos)
if (rStart == None) and (gPos == gStart):
rStart = rPos
if (gPos == gEnd):
rEnd = rPos
break
if (op == "I"):
rPos += count
elif (op == "D"):
gPos += count
else: # if (op == "M"):
if (rStart == None) and (gPos < gStart < gPos+count):
rStart = rPos + gStart-gPos
if (gPos < gEnd < gPos+count):
rEnd = rPos + gEnd-gPos
break
gPos += count
rPos += count
if ("mapping" in debug):
print >>stderr, " g=%d r=%d" % (gPos,rPos)
if (rEnd == None):
assert (rPos == rLength)
if (gPos == gEnd): rEnd = rPos
else: rEnd = (">",rPos)
assert (rStart != None)
# if read was pulled from revcomp of genome, we need to reverse
# the positions here
if (strand == "-"):
if (type(rEnd) == tuple): reverseStart = ("<",0)
else: reverseStart = rLength-rEnd
if (type(rStart) == tuple): reverseEnd = (">",rLength)
else: reverseEnd = rLength-rStart
(rStart,rEnd) = (reverseStart,reverseEnd)
yield (name,strand,rStart,rEnd)
def read_intervals(f):
lineNumber = 0
for line in f:
lineNumber += 1
line = line.strip()
if (line == ""): continue
if (line.startswith("#")): continue
fields = line.split()
assert (len(fields) >= 3), \
"not enough fields at line %d (%d, expected at least %d)" \
% (lineNumber,len(fields),3)
try:
chrom = fields[0]
gStart = int(fields[1])
gEnd = int(fields[2])
if (gEnd < gStart): raise ValueError
tags = None if (len(fields) == 3) else fields[3:]
except ValueError:
assert (False), "bad line (%d): %s" % (lineNumber,line)
yield (chrom,gStart,gEnd,tags)
def read_cigars(f):
lineNumber = 0
for line in f:
lineNumber += 1
line = line.strip()
if (line == ""): continue
if (line.startswith("#")): continue
fields = line.split()
assert (len(fields) >= 5), \
"not enough fields at line %d (%d, expected at lest %d)" \
% (lineNumber,len(fields),5)
try:
name = fields[0]
chrom = fields[1]
gStart = int(fields[2])
gEnd = int(fields[3])
cigar = " ".join(fields[4:])
if (gEnd < gStart): raise ValueError
except ValueError:
assert (False), "bad cigar line (%d): %s" % (lineNumber,line)
if (chrom.endswith("+")): (chrom,strand) = (chrom[:-1],"+")
elif (chrom.endswith("-")): (chrom,strand) = (chrom[:-1],"-")
else: strand = "+"
try:
cigar = list(cigar_ops(cigar))
except ValueError:
assert (False), "unparsable cigar string (line %d): %s" % (lineNumber,cigar)
yield (name,chrom,strand,gStart,gEnd,cigar)
# cigar_ops--
# Convert cigar string into a series of (count,op) pairs
def cigar_ops(cigar):
count = ""
for ch in cigar:
if (ch in "0123456789"):
count += ch
if (count == "0"): raise ValueError
elif (ch in "MID"):
if (count == ""): raise ValueError
yield (int(count),ch)
count = ""
elif (count == "") and (ch in [" ","\t"]):
pass # allow whitespace before count
else:
raise ValueError
if (count != ""): raise ValueError
# cigar_lengths--
def cigar_lengths(cigar):
gLength = rLength = 0
for (count,op) in cigar:
if (op == "I"):
rLength += count
elif (op == "D"):
gLength += count
else: # if (op == "M"):
gLength += count
rLength += count
return (gLength,rLength)
if __name__ == "__main__": main()
| true
|
09ca13387e545e18ed7448776f25ed1bf0382915
|
Python
|
harkiratbehl/PyGM
|
/src/codegen.py
|
UTF-8
| 21,493
| 2.75
| 3
|
[
"MIT"
] |
permissive
|
#!/usr/bin/python
"""Generate Assembly code from 3AC"""
import sys
from code import Code, ThreeAddressCode
from registers import Registers
from symbol_table import SymbolTable
three_addr_code = ThreeAddressCode()
assembly_code = Code()
registers = Registers()
input_file = ''
start_main = 0
start_param = 0
def convert_tac(ThreeAddressCode):
"""Reads three adress code generated from parser and converts to TAC for codegen;
generates the three_addr_code along with leaders;
populates generate symbol table as per three_addr_code"""
for i in range(ThreeAddressCode.length()):
three_addr_instr = ThreeAddressCode.code[i]
three_addr_instr = [str(i+1)] + three_addr_instr
three_addr_code.add_line(three_addr_instr)
if len(three_addr_instr) != 5:
print("Incorrect size for the following instruction: ")
print(three_addr_instr)
return -1
if three_addr_instr[0] == '':
print("Line number not given in the following instruction: ")
print(three_addr_instr)
return -1
import re
if re.search(r'\D', three_addr_instr[0]) != None:
print("Invalid line number given in the following instruction: ")
print(three_addr_instr)
return -1
leader_generating_if_instr = []
leader_generating_if_instr += ['ifgotoeq']
leader_generating_if_instr += ['ifgotoneq']
leader_generating_if_instr += ['ifgotolt']
leader_generating_if_instr += ['ifgotolteq']
leader_generating_if_instr += ['ifgotogt']
leader_generating_if_instr += ['ifgotogteq']
if three_addr_instr[1] in leader_generating_if_instr:
three_addr_code.add_leader(three_addr_code.length())
leader_generating_other_instr = ['label']
if three_addr_instr[1] in leader_generating_if_instr:
three_addr_code.add_leader(three_addr_code.length()-1)
leader_generating_other_instr = []
leader_generating_other_instr += ['goto']
leader_generating_other_instr += ['break']
leader_generating_other_instr += ['continue']
if three_addr_instr[1] in leader_generating_other_instr:
three_addr_code.add_leader(three_addr_code.length())
three_addr_code.leaders = sorted(three_addr_code.leaders, key=int)
return three_addr_code
def generate_assembly(three_addr_code,var_list,symbol_table):
"""Generate assembly code"""
# data region to handle global data and constants
assembly_code.add_line('\t.data')
assembly_code.add_line('newline:\t.asciiz "\n"')
#declaring variables from list of variables
for var in var_list:
if var.size == []:
if var.parameters == []:
line = '%s:\t.word 0' % var.name
else:
line = var.name + ':\t.asciiz \"' + var.parameters[0].name + '\"'
else:
space = 4*int(var.size)
line = var.name + ':\t.space 0:' + str(space)
assembly_code.add_line(line)
# functions
assembly_code.add_line('\t.text')
global start_main
translator_error = 0
for i in range(three_addr_code.length()):
# if i in three_addr_code.leaders:
# assembly_code.add_line('Line_' + str(i + 1) + ':')
three_addr_instr = three_addr_code.code[i]
if translator(three_addr_instr,symbol_table) != 0:
translator_error = 1
print('Unidentified operator in this Three Address Instruction: ' + ", ".join(three_addr_instr))
return
if start_main == 1:
assembly_code.add_line('li $v0, 10')
assembly_code.add_line('syscall')
return assembly_code
def translator(three_addr_instr,symbol_table):
"""Translate Three Address Instruction to Assembly"""
global start_main
global start_param
# parse three_addr_instr
line_no = int(three_addr_instr[0])
instr_op = three_addr_instr[1]
dest = three_addr_instr[2]
src1 = three_addr_instr[3]
src2 = three_addr_instr[4]
reg_temp1, reg_idx1, reg_idx2, reg_idx3 = '', '', '', ''
if '[' in dest:
d1 = dest.find('[')
d2 = dest.find(']')
var1 = dest[:d1]
idx1 = dest[d1+1:d2]
assembly_code.add_line('sub $sp, $sp, 4')
reg_idx1 = registers.get_register(idx1, symbol_table, line_no, assembly_code)
assembly_code.add_line('sll ' + reg_idx1 + ', ' + reg_idx1 + ', 2')
reg_temp2 = registers.get_register('0', symbol_table, line_no, assembly_code)
assembly_code.add_line('la ' + reg_temp1 + ', ' + var1)
if '[' in src1:
d1 = src1.find('[')
d2 = src1.find(']')
var2 = src1[:d1]
idx2 = src1[d1+1:d2]
reg_idx2 = registers.get_register(idx2, symbol_table, line_no, assembly_code)
assembly_code.add_line('sll ' + reg_idx2 + ', ' + reg_idx2 + ', 2')
reg_temp2 = registers.get_register('0', symbol_table, line_no, assembly_code)
assembly_code.add_line('la ' + reg_temp2 + ', ' + var2)
assembly_code.add_line('lw ' + reg_idx2 + ', ' + reg_idx2 + ', (' + reg_temp2 + ')')
if '[' in src2:
d1 = src2.find('[')
d2 = src2.find(']')
var3 = src2[:d1]
idx3 = src2[d1+1:d2]
reg_idx3 = registers.get_register(idx2, symbol_table, line_no, assembly_code)
assembly_code.add_line('sll ' + reg_idx3 + ', ' + reg_idx3 + ', 2')
reg_temp3 = registers.get_register('0', symbol_table, line_no, assembly_code)
assembly_code.add_line('la ' + reg_temp3 + ', ' + var3)
assembly_code.add_line('lw ' + reg_idx3 + ', ' + reg_idx3 + ', (' + reg_temp3 + ')')
#### if variable has [] then take that from memory location
if instr_op == 'stack_push':
assembly_code.add_line('sub $sp, $sp, 4')
assembly_code.add_line('sw $ra, ($sp)')
assembly_code.add_line('sub $sp, $sp, 4')
assembly_code.add_line('sw $fp, ($sp)')
# if reg_idx1 != '':
# assembly_code.add_line('sw ' + reg_dest + ', ' + reg_idx1 + '(' + reg_temp1 + ')')
return 0
if instr_op == 'label':
assembly_code.add_line(dest + ':')
# if reg_idx1 != '':
# assembly_code.add_line('sw ' + reg_dest + ', ' + reg_idx1 + '(' + reg_temp1 + ')')
return 0
if instr_op == 'goto':
assembly_code.add_line('j ' + dest)
# if reg_idx1 != '':
# assembly_code.add_line('sw ' + reg_dest + ', ' + reg_idx1 + '(' + reg_temp1 + ')')
return 0
if instr_op == 'break':
assembly_code.add_line('j ' + dest)
# if reg_idx1 != '':
# assembly_code.add_line('sw ' + reg_dest + ', ' + reg_idx1 + '(' + reg_temp1 + ')')
return 0
if instr_op == 'continue':
assembly_code.add_line('j ' + dest)
# if reg_idx1 != '':
# assembly_code.add_line('sw ' + reg_dest + ', ' + reg_idx1 + '(' + reg_temp1 + ')')
return 0
if instr_op == 'print_str':
assembly_code.add_line('la $a0, ' + dest)
assembly_code.add_line('li $v0, 4')
assembly_code.add_line('syscall')
# if reg_idx1 != '':
# assembly_code.add_line('sw ' + reg_dest + ', ' + reg_idx1 + '(' + reg_temp1 + ')')
return 0
if instr_op == 'func':
if dest == 'scope_0_main':
assembly_code.add_line('main:')
start_main = 1
if dest != 'scope_0_main' and start_main == 1:
assembly_code.add_line('li $v0, 10')
assembly_code.add_line('syscall')
start_main = 0
if dest != 'scope_0_main':
assembly_code.add_line('func_' + dest + ':')
# if reg_idx1 != '':
# assembly_code.add_line('sw ' + reg_dest + ', ' + reg_idx1 + '(' + reg_temp1 + ')')
return 0
if instr_op == 'call':
assembly_code.add_line('jal func_' + dest)
for r in reversed(registers.registers):
assembly_code.add_line('lw ' + r + ', ($sp)')
assembly_code.add_line('addiu $sp, $sp, 4')
start_param = 0
# if reg_idx1 != '':
# assembly_code.add_line('sw ' + reg_dest + ', ' + reg_idx1 + '(' + reg_temp1 + ')')
return 0
# Using reg_dest
if dest != '':
reg_dest = registers.get_register(dest, symbol_table, line_no, assembly_code)
if instr_op == 'putparam':
if start_param == 0:
for r in registers.registers:
assembly_code.add_line('sub $sp, $sp, 4')
assembly_code.add_line('sw ' + r + ', ($sp)')
start_param = 1
assembly_code.add_line('sub $sp, $sp, 4')
assembly_code.add_line('sw ' + reg_dest + ', ($sp)')
if reg_idx1 != '':
assembly_code.add_line('sw ' + reg_dest + ', ' + reg_idx1 + '(' + reg_temp1 + ')')
return 0
if instr_op == 'getparam':
assembly_code.add_line('lw ' + reg_dest + ', ($sp)')
assembly_code.add_line('addiu $sp, $sp, 4')
if reg_idx1 != '':
assembly_code.add_line('sw ' + reg_dest + ', ' + reg_idx1 + '(' + reg_temp1 + ')')
return 0
if instr_op == 'print_int':
assembly_code.add_line('li $v0, 1')
assembly_code.add_line('move $a0, ' + reg_dest)
assembly_code.add_line('syscall')
assembly_code.add_line('li $v0, 4')
assembly_code.add_line('la $a0, newline')
assembly_code.add_line('syscall')
if reg_idx1 != '':
assembly_code.add_line('sw ' + reg_dest + ', ' + reg_idx1 + '(' + reg_temp1 + ')')
return 0
if instr_op == 'scan_int':
assembly_code.add_line('li $v0, 5')
assembly_code.add_line('syscall')
assembly_code.add_line('move ' + reg_dest + ', $v0')
if reg_idx1 != '':
assembly_code.add_line('sw ' + reg_dest + ', ' + reg_idx1 + '(' + reg_temp1 + ')')
return 0
if instr_op == 'return':
if dest != '':
assembly_code.add_line('move $v0, ' + reg_dest)
assembly_code.add_line('lw $fp, ($sp)')
assembly_code.add_line('addiu $sp, $sp, 4')
assembly_code.add_line('lw $ra, ($sp)')
assembly_code.add_line('addiu $sp, $sp, 4')
assembly_code.add_line('jr $ra')
if reg_idx1 != '':
assembly_code.add_line('sw ' + reg_dest + ', ' + reg_idx1 + '(' + reg_temp1 + ')')
return 0
if instr_op == 'return_value':
assembly_code.add_line('move ' + reg_dest + ', $v0')
if reg_idx1 != '':
assembly_code.add_line('sw ' + reg_dest + ', ' + reg_idx1 + '(' + reg_temp1 + ')')
return 0
if instr_op == 'get_val_at_add':
# write src1 to address dest
assembly_code.add_line('la ' + reg_dest + ', ' + src1)
if reg_idx1 != '':
assembly_code.add_line('sw ' + reg_dest + ', ' + reg_idx1 + '(' + reg_temp1 + ')')
return 0
# Using reg_src1
if src1 != '':
if reg_idx2 == '':
reg_src1 = registers.get_register(src1, symbol_table, line_no, assembly_code)
else:
reg_src1 = reg_idx2
if instr_op == '+=':
assembly_code.add_line('add ' + reg_dest + ', ' + reg_dest + ', ' + reg_src1)
if reg_idx1 != '':
assembly_code.add_line('sw ' + reg_dest + ', ' + reg_idx1 + '(' + reg_temp1 + ')')
return 0
if instr_op == '-=':
assembly_code.add_line('sub ' + reg_dest + ', ' + reg_dest + ', ' + reg_src1)
if reg_idx1 != '':
assembly_code.add_line('sw ' + reg_dest + ', ' + reg_idx1 + '(' + reg_temp1 + ')')
return 0
if instr_op == '*=':
assembly_code.add_line('mult ' + reg_dest + ', ' + reg_src1)
assembly_code.add_line('mflo ' + reg_dest)
if reg_idx1 != '':
assembly_code.add_line('sw ' + reg_dest + ', ' + reg_idx1 + '(' + reg_temp1 + ')')
return 0
if instr_op == '/=':
assembly_code.add_line('div ' + reg_dest + ', ' + reg_src1)
assembly_code.add_line('mflo ' + reg_dest) # HI
if reg_idx1 != '':
assembly_code.add_line('sw ' + reg_dest + ', ' + reg_idx1 + '(' + reg_temp1 + ')')
return 0
if instr_op == '%=':
assembly_code.add_line('div ' + reg_dest + ', ' + reg_src1)
assembly_code.add_line('mfhi ' + reg_dest) # HI
if reg_idx1 != '':
assembly_code.add_line('sw ' + reg_dest + ', ' + reg_idx1 + '(' + reg_temp1 + ')')
return 0
if instr_op == '<<=':
assembly_code.add_line('sllv ' + reg_dest + ', ' + reg_dest + ', ' + reg_src1)
if reg_idx1 != '':
assembly_code.add_line('sw ' + reg_dest + ', ' + reg_idx1 + '(' + reg_temp1 + ')')
return 0
if instr_op == '>>=':
assembly_code.add_line('srlv ' + reg_dest + ', ' + reg_dest + ', ' + reg_src1)
if reg_idx1 != '':
assembly_code.add_line('sw ' + reg_dest + ', ' + reg_idx1 + '(' + reg_temp1 + ')')
return 0
if instr_op == '=':
assembly_code.add_line('move ' + reg_dest + ', ' + reg_src1)
if reg_idx1 != '':
assembly_code.add_line('sw ' + reg_dest + ', ' + reg_idx1 + '(' + reg_temp1 + ')')
return 0
if instr_op == ':=':
assembly_code.add_line('move ' + reg_dest + ', ' + reg_src1)
if reg_idx1 != '':
assembly_code.add_line('sw ' + reg_dest + ', ' + reg_idx1 + '(' + reg_temp1 + ')')
return 0
if instr_op == 'ifgotoeq':
assembly_code.add_line('beq ' + reg_dest + ', ' + reg_src1 + ', ' + src2)
if reg_idx1 != '':
assembly_code.add_line('sw ' + reg_dest + ', ' + reg_idx1 + '(' + reg_temp1 + ')')
return 0
if instr_op == 'ifgotoneq':
assembly_code.add_line('bne ' + reg_dest + ', ' + reg_src1 + ', ' + src2)
if reg_idx1 != '':
assembly_code.add_line('sw ' + reg_dest + ', ' + reg_idx1 + '(' + reg_temp1 + ')')
return 0
if instr_op == 'ifgotolt':
assembly_code.add_line('blt ' + reg_dest + ', ' + reg_src1 + ', ' + src2)
if reg_idx1 != '':
assembly_code.add_line('sw ' + reg_dest + ', ' + reg_idx1 + '(' + reg_temp1 + ')')
return 0
if instr_op == 'ifgotolteq':
assembly_code.add_line('ble ' + reg_dest + ', ' + reg_src1 + ', ' + src2)
if reg_idx1 != '':
assembly_code.add_line('sw ' + reg_dest + ', ' + reg_idx1 + '(' + reg_temp1 + ')')
return 0
if instr_op == 'ifgotogt':
assembly_code.add_line('bgt ' + reg_dest + ', ' + reg_src1 + ', ' + src2)
if reg_idx1 != '':
assembly_code.add_line('sw ' + reg_dest + ', ' + reg_idx1 + '(' + reg_temp1 + ')')
return 0
if instr_op == 'ifgotogteq':
assembly_code.add_line('bge ' + reg_dest + ', ' + reg_src1 + ', ' + src2)
if reg_idx1 != '':
assembly_code.add_line('sw ' + reg_dest + ', ' + reg_idx1 + '(' + reg_temp1 + ')')
return 0
if instr_op == 'read_add':
# read from src1 address to dest
# Similar to * operator or dereferencing
assembly_code.add_line('lw ' + reg_dest + ', ' + '0(' + reg_src1+ ')')
if reg_idx1 != '':
assembly_code.add_line('sw ' + reg_dest + ', ' + reg_idx1 + '(' + reg_temp1 + ')')
return 0
if instr_op == 'write_add':
# write src1 to address dest
assembly_code.add_line('sw ' + reg_dest + ', ' + '0(' + reg_src1+ ')')
if reg_idx1 != '':
assembly_code.add_line('sw ' + reg_dest + ', ' + reg_idx1 + '(' + reg_temp1 + ')')
return 0
# Using reg_src2
if src2 != '':
reg_src2 = registers.get_register(src2, symbol_table, line_no, assembly_code)
if reg_idx3 == '':
reg_src2 = registers.get_register(src2, symbol_table, line_no, assembly_code)
else:
reg_src2 = reg_idx3
if instr_op == '+':
if src2 != '':
assembly_code.add_line('add ' + reg_dest + ', ' + reg_src1 + ', ' + reg_src2)
else:
assembly_code.add_line('move ' + reg_dest + ', ' + reg_src1)
if reg_idx1 != '':
assembly_code.add_line('sw ' + reg_dest + ', ' + reg_idx1 + '(' + reg_temp1 + ')')
return 0
if instr_op == '-':
if src2 != '':
assembly_code.add_line('sub ' + reg_dest + ', ' + reg_src1 + ', ' + reg_src2)
else:
src1 = '-' + src1
reg_src1 = registers.get_register(src1, symbol_table, line_no, assembly_code)
assembly_code.add_line('move ' + reg_dest + ', ' + reg_src1)
if reg_idx1 != '':
assembly_code.add_line('sw ' + reg_dest + ', ' + reg_idx1 + '(' + reg_temp1 + ')')
return 0
if instr_op == '*':
assembly_code.add_line('mult ' + reg_src1 + ', ' + reg_src2)
assembly_code.add_line('mflo ' + reg_dest) # LO 32
if reg_idx1 != '':
assembly_code.add_line('sw ' + reg_dest + ', ' + reg_idx1 + '(' + reg_temp1 + ')')
return 0
if instr_op == '/':
assembly_code.add_line('div ' + reg_src1 + ', ' + reg_src2)
assembly_code.add_line('mflo ' + reg_dest) # LO
if reg_idx1 != '':
assembly_code.add_line('sw ' + reg_dest + ', ' + reg_idx1 + '(' + reg_temp1 + ')')
return 0
if instr_op == '%':
assembly_code.add_line('div ' + reg_src1 + ', ' + reg_src2)
assembly_code.add_line('mfhi ' + reg_dest) # HI
if reg_idx1 != '':
assembly_code.add_line('sw ' + reg_dest + ', ' + reg_idx1 + '(' + reg_temp1 + ')')
return 0
if instr_op == '&&':
assembly_code.add_line('and ' + reg_dest + ', ' + reg_src1 + ', ' + reg_src2)
if reg_idx1 != '':
assembly_code.add_line('sw ' + reg_dest + ', ' + reg_idx1 + '(' + reg_temp1 + ')')
return 0
if instr_op == '||':
assembly_code.add_line('or ' + reg_dest + ', ' + reg_src1 + ', ' + reg_src2)
if reg_idx1 != '':
assembly_code.add_line('sw ' + reg_dest + ', ' + reg_idx1 + '(' + reg_temp1 + ')')
return 0
if instr_op == '^':
assembly_code.add_line('xor ' + reg_dest + ', ' + reg_src1 + ', ' + reg_src2)
if reg_idx1 != '':
assembly_code.add_line('sw ' + reg_dest + ', ' + reg_idx1 + '(' + reg_temp1 + ')')
return 0
if instr_op == '!=':
assembly_code.add_line('sne ' + reg_dest + ', ' + reg_src1 + ', ' + reg_src2)
if reg_idx1 != '':
assembly_code.add_line('sw ' + reg_dest + ', ' + reg_idx1 + '(' + reg_temp1 + ')')
return 0
if instr_op == '<=':
assembly_code.add_line('sle ' + reg_dest + ', ' + reg_src1 + ', ' + reg_src2)
if reg_idx1 != '':
assembly_code.add_line('sw ' + reg_dest + ', ' + reg_idx1 + '(' + reg_temp1 + ')')
return 0
if instr_op == '>=':
assembly_code.add_line('sge ' + reg_dest + ', ' + reg_src1 + ', ' + reg_src2)
if reg_idx1 != '':
assembly_code.add_line('sw ' + reg_dest + ', ' + reg_idx1 + '(' + reg_temp1 + ')')
return 0
if instr_op == '==':
assembly_code.add_line('seq ' + reg_dest + ', ' + reg_src1 + ', ' + reg_src2)
if reg_idx1 != '':
assembly_code.add_line('sw ' + reg_dest + ', ' + reg_idx1 + '(' + reg_temp1 + ')')
return 0
if instr_op == '<':
assembly_code.add_line('slt ' + reg_dest + ', ' + reg_src1 + ', ' + reg_src2)
if reg_idx1 != '':
assembly_code.add_line('sw ' + reg_dest + ', ' + reg_idx1 + '(' + reg_temp1 + ')')
return 0
if instr_op == '>':
assembly_code.add_line('sgt ' + reg_dest + ', ' + reg_src1 + ', ' + reg_src2)
if reg_idx1 != '':
assembly_code.add_line('sw ' + reg_dest + ', ' + reg_idx1 + '(' + reg_temp1 + ')')
return 0
if instr_op == '!':
assembly_code.add_line('li ' + reg_src1 + ', 1')
assembly_code.add_line('xor ' + reg_dest + ', ' + reg_src2 + ', ' + reg_src1)
if reg_idx1 != '':
assembly_code.add_line('sw ' + reg_dest + ', ' + reg_idx1 + '(' + reg_temp1 + ')')
return 0
if instr_op == '<<':
assembly_code.add_line('sllv ' + reg_dest + ', ' + reg_src1 + ', ' + reg_src2)
if reg_idx1 != '':
assembly_code.add_line('sw ' + reg_dest + ', ' + reg_idx1 + '(' + reg_temp1 + ')')
return 0
if instr_op == '>>':
assembly_code.add_line('srlv ' + reg_dest + ', ' + reg_src1 + ', ' + reg_src2)
if reg_idx1 != '':
assembly_code.add_line('sw ' + reg_dest + ', ' + reg_idx1 + '(' + reg_temp1 + ')')
return 0
return 1
def codegen():
"""defines a function for codegen"""
if __name__ == '__main__':
if len(sys.argv) != 2:
print('Usage: python /path/to/codegen.py /path/to/3AC.ir')
sys.exit(1)
input_file = sys.argv[1] # file containing the three address code
import os
if os.path.isfile(input_file) is False:
print('Input file ' + input_file + ' does not exist')
sys.exit(1)
if read_input_file() == 0:
if generate_assembly() == 0:
# if start_main == 1:
# assembly_code.add_line('li $v0, 10')
# assembly_code.add_line('syscall')
assembly_code.print_code()
else:
print('Unidentified operator in the above line(s)')
| true
|
ad6902ea54982790803383cd7621c88d9f84f0e7
|
Python
|
AdamZhouSE/pythonHomework
|
/Code/CodeRecords/2595/59140/256525.py
|
UTF-8
| 116
| 3.3125
| 3
|
[] |
no_license
|
T=int(input())
for i in range(0,T):
example=input().split(" ")
print(pow(int(example[1]),int(example[0])-1))
| true
|
d06064d3da1c87d3522491c6036bf605de67397b
|
Python
|
kukosek/dotfiles
|
/.config/polybar/forest/scripts/playerctl-display.py
|
UTF-8
| 863
| 2.765625
| 3
|
[] |
no_license
|
#!/usr/bin/python3.9
import subprocess
import textwrap
try:
playing = subprocess.check_output(['playerctl', 'status'], stderr=subprocess.STDOUT).decode('utf-8').strip() == "Playing"
except subprocess.CalledProcessError:
playing = False
if playing:
track_title = ""
track_author = ""
metadata = subprocess.check_output(['playerctl', 'metadata']).decode('utf-8')
for line in metadata.splitlines():
source, meta, value = line.split(' ', 2)
meta = meta.strip()
value = value.strip()
if meta == "xesam:title":
track_title = value
if meta == "xesam:artist":
track_author = value
if len(track_title+track_author) > 30:
print(textwrap.shorten(track_title, width=45, placeholder="..."))
else:
print(track_author+" - "+track_title)
else:
print("Offline")
| true
|
fc3fb1a13c2bbcc81e0b581dc7fb33390b5ba100
|
Python
|
guozhengpku/package_function
|
/multi_match.py
|
UTF-8
| 1,929
| 3.140625
| 3
|
[] |
no_license
|
#-*-coding:utf-8-*-
from gensim.models import Word2Vec
class PrefixQuery(object):
def __init__(self, words={}):
self.prefix_dict = {}
self.label_dict = {}
self._init(words)
# print(self.prefix_dict)
def _init(self, words):
for word in words:
self.insert(word)
def insert(self, word, label=''):
len_w = len(word)
for i in range(1, len_w + 1):
w = word[:i]
if i == len_w:
self.prefix_dict[w] = 1
if label:
self.label_dict[w] = label
elif w not in self.prefix_dict:
self.prefix_dict[w] = 0
def query_words(self, text):
t_len = len(text)
matchs = []
i = 0
result_word = ""
result_index = -1
while i < t_len - 1:
for j in range(i + 1, t_len + 1):
word = text[i:j]
if word not in self.prefix_dict:
if result_word:
matchs.append([result_word + '_' + self.label_dict[result_word], (result_index, result_index + len(result_word) - 1)])
i = result_index + len(result_word) - 1
result_word = ""
result_index = -1
break
if self.prefix_dict.get(word) == 1:
result_word = word
result_index = i
# print(result_word + '\t' + str(result_index))
i += 1
if result_word:
matchs.append([result_word + '_' + self.label_dict[result_word], (result_index, result_index + len(result_word) - 1)])
#print(matchs)
return matchs
#query = PrefixQuery()
#query.insert(u"心态浮躁", "label")
#print(query.prefix_dict)
#text = u"珍格格心态浮躁"
#res = query.query_words(text)
#print(res)
| true
|
a8220535bb24b0dc33c6427095c0da4fdebc331f
|
Python
|
IzaakPrats/beginning_python
|
/basic_calculator.py
|
UTF-8
| 421
| 4.0625
| 4
|
[
"MIT"
] |
permissive
|
x = int(input("Enter your first number: "))
y = int(input("Input your second number: "))
o = str(input("Enter the operator: "))
def add(x, y):
return x + y
def subtract(x, y):
return x - y;
def multiply(x, y):
return x * y;
def divide(x, y):
return x / y;
if o is "+":
print(str(add(x, y)))
if o is "-":
print(str(subtract(x,y)))
if o is "*":
print(str(multiply(x,y)))
if o is "/":
print(str(divide(x,y)))
| true
|
a4261837adfa810db7075e3e0dcfd3e626d45a59
|
Python
|
frc-5160-the-chargers/lebot-james
|
/components/loader.py
|
UTF-8
| 658
| 2.53125
| 3
|
[] |
no_license
|
from ctre import WPI_TalonSRX
from modes import LoaderPosition
class LoaderConstants:
K_POWER_UP = 0.25
K_POWER_DOWN = -0.25
class Loader:
loader_motor: WPI_TalonSRX
def __init__(self):
self.reset()
def reset(self):
self.enabled = False
self.position = LoaderPosition.UP
def execute(self):
if self.enabled:
if self.position == LoaderPosition.UP:
self.loader_motor.set(LoaderConstants.K_POWER_UP)
elif self.position == LoaderPosition.DOWN:
self.loader_motor.set(LoaderConstants.K_POWER_DOWN)
else:
self.loader_motor.set(0)
| true
|
494e02127ec143eccde29e937390a7dce62e4700
|
Python
|
inergoul/boostcamp_peer_session
|
/coding_test/pass_42576_완주하지못한선수/solution_LJH.py
|
UTF-8
| 379
| 3.015625
| 3
|
[] |
no_license
|
# https://programmers.co.kr/learn/courses/30/lessons/42576
def solution(participant, completion):
sorted_p = sorted(participant)
sorted_c = sorted(completion) + [0] # 길이가 다르기때문에 마지막에 맞춰줌
answer = 0
for p, c in zip(sorted_p, sorted_c):
if p != c:
answer = p
break
return answer
| true
|
1e2a900c4a7c45c5511e46a4bda663d516df6c53
|
Python
|
BejeweledMe/TReNDS-Neuroimaging
|
/3D_CNN/losses.py
|
UTF-8
| 727
| 2.75
| 3
|
[] |
no_license
|
from torch import nn
import torch
class W_NAE(nn.Module):
def __init__(self, w=[0.3, 0.175, 0.175, 0.175, 0.175]):
super().__init__()
self.w = torch.FloatTensor(w)
def forward(self, output, target):
if not (target.size() == output.size()):
raise ValueError('Target size ({}) must be the same as input size ({})'
.format(target.size(), output.size()))
loss = torch.sum(
self.w * torch.sum(torch.abs(target - output), axis=0) / torch.sum(target, axis=0)
)
return loss
losses_dict = {
'mae': nn.L1Loss(),
'w_nae': W_NAE(),
}
def loss_function(loss):
criterion = losses_dict[loss]
return criterion
| true
|
f489f7b3b754f21afcf5ea657301d1a880d3acc0
|
Python
|
spacetime314/python3_ios
|
/extraPackages/matplotlib-3.0.2/examples/text_labels_and_annotations/fonts_demo.py
|
UTF-8
| 2,915
| 3.3125
| 3
|
[
"BSD-3-Clause"
] |
permissive
|
"""
==================================
Fonts demo (object-oriented style)
==================================
Set font properties using setters.
See :doc:`fonts_demo_kw` to achieve the same effect using kwargs.
"""
from matplotlib.font_manager import FontProperties
import matplotlib.pyplot as plt
plt.subplot(111, facecolor='w')
font0 = FontProperties()
alignment = {'horizontalalignment': 'center', 'verticalalignment': 'baseline'}
# Show family options
families = ['serif', 'sans-serif', 'cursive', 'fantasy', 'monospace']
font1 = font0.copy()
font1.set_size('large')
t = plt.text(-0.8, 0.9, 'family', fontproperties=font1,
**alignment)
yp = [0.8, 0.7, 0.6, 0.5, 0.4, 0.3, 0.2]
for k, family in enumerate(families):
font = font0.copy()
font.set_family(family)
t = plt.text(-0.8, yp[k], family, fontproperties=font,
**alignment)
# Show style options
styles = ['normal', 'italic', 'oblique']
t = plt.text(-0.4, 0.9, 'style', fontproperties=font1,
**alignment)
for k, style in enumerate(styles):
font = font0.copy()
font.set_family('sans-serif')
font.set_style(style)
t = plt.text(-0.4, yp[k], style, fontproperties=font,
**alignment)
# Show variant options
variants = ['normal', 'small-caps']
t = plt.text(0.0, 0.9, 'variant', fontproperties=font1,
**alignment)
for k, variant in enumerate(variants):
font = font0.copy()
font.set_family('serif')
font.set_variant(variant)
t = plt.text(0.0, yp[k], variant, fontproperties=font,
**alignment)
# Show weight options
weights = ['light', 'normal', 'medium', 'semibold', 'bold', 'heavy', 'black']
t = plt.text(0.4, 0.9, 'weight', fontproperties=font1,
**alignment)
for k, weight in enumerate(weights):
font = font0.copy()
font.set_weight(weight)
t = plt.text(0.4, yp[k], weight, fontproperties=font,
**alignment)
# Show size options
sizes = ['xx-small', 'x-small', 'small', 'medium', 'large',
'x-large', 'xx-large']
t = plt.text(0.8, 0.9, 'size', fontproperties=font1,
**alignment)
for k, size in enumerate(sizes):
font = font0.copy()
font.set_size(size)
t = plt.text(0.8, yp[k], size, fontproperties=font,
**alignment)
# Show bold italic
font = font0.copy()
font.set_style('italic')
font.set_weight('bold')
font.set_size('x-small')
t = plt.text(-0.4, 0.1, 'bold italic', fontproperties=font,
**alignment)
font = font0.copy()
font.set_style('italic')
font.set_weight('bold')
font.set_size('medium')
t = plt.text(-0.4, 0.2, 'bold italic', fontproperties=font,
**alignment)
font = font0.copy()
font.set_style('italic')
font.set_weight('bold')
font.set_size('x-large')
t = plt.text(-0.4, 0.3, 'bold italic', fontproperties=font,
**alignment)
plt.axis([-1, 1, 0, 1])
plt.show()
| true
|
d54c9d5846b1d912d71851d4fae4f588bc999461
|
Python
|
russellmacshane/learn_day_01may20
|
/utils/utils.py
|
UTF-8
| 605
| 2.65625
| 3
|
[] |
no_license
|
class Utils:
def summary_output(self, json):
return {
'NewConfirmed': json['NewConfirmed'],
'TotalConfirmed': json['TotalConfirmed'],
'NewDeaths': json['NewDeaths'],
'TotalDeaths': json['TotalDeaths'],
'NewRecovered': json['NewRecovered'],
'TotalRecovered': json['TotalRecovered']
}
def save_confirmed(self, confirmed):
f = open("assets/stats", "a")
f.write(f'{str(confirmed)}\n')
f.close()
def all_confirmed(self):
f = open("assets/stats", "r")
return f.read()
| true
|
f345e85a421862125d5d0a758e4397c1ca4e9746
|
Python
|
Iigorsf/Python
|
/ex038.py
|
UTF-8
| 454
| 4.4375
| 4
|
[
"MIT"
] |
permissive
|
#Escreva um programa que leia dois números inteiros e compare-os, mostrando na tela uma mensagem:
#O primeiro valor é maior
#O Segundo valor é maior
#Não existe valor maior, os dois são iguais
num= int(input("Digite um número: "))
num2= int(input("Digite outro número: "))
if num > num2:
print("O primeiro valor é maior")
elif num < num2:
print("O segundo valor é maior")
else:
print("Não existe valor maior, os dois são iguais")
| true
|
346809e14ab97f0ce532728a204caec1afde2556
|
Python
|
kimnakyeong/changeToAWS
|
/Dr.Foody/scraper/so.py
|
UTF-8
| 2,006
| 3.09375
| 3
|
[] |
no_license
|
import requests
from bs4 import BeautifulSoup
LIMIT = 50
URL = f"https://stackoverflow.com/jobs?q=python&sort=i"
def get_last_page():
result = requests.get(URL)
soup = BeautifulSoup(result.text,"html.parser")
pages = soup.find("div", {"class":"s-pagination"}).find_all("a")
last_page = pages[-2].get_text(strip=True)
return int(last_page)
def extract_job(html): #html = soup
title = html.find("div", {"class":"grid--cell fl1"}).find("h2").find("a")["title"]
company, location = html.find("h3", {
"class": "fc-black-700"
}).find_all("span", recursive=False)
company=company.get_text(strip=True)
location=location.get_text(strip=True)
job_id = html["data-jobid"]
return {
"title": title,
"company": company,
"location": location,
"link": f"https://stackoverflow.com/jobs/{job_id}"
}
# company, location = html.find("h3", { # 2개의 item이 있다는 것을 아니까 2개의 변수에 담음. unpacking 방법
# "class": "fc-black-700"
# }).find_all(
# "span", recursive=False) #recursive = False : 좀 더 깊은 레벨로 가지말고, 첫번째 depth만 가져오기
# company = company.get_text(strip=True).strip("-") #strip=True
# location = location.get_text(strip=True)
# return {"title": title}
def extract_jobs(last_page):
jobs = []
for page in range(last_page):
print(f"Scrapping Indeed: page {page}")
result = requests.get(f"{URL}&pg={page+1}")
soup = BeautifulSoup(result.text, "html.parser")
results = soup.find_all("div", {"class":"-job"})
for result in results:
job = extract_job(result)
jobs.append(job)
return jobs
def get_jobs():
last_page = get_last_page() # 1. 마지막 페이지 추출
# page 갯수만큼 request를 보내야 한다.
jobs = extract_jobs(last_page) # 2. 1페이지부터 마지막 페이지 까지 request 날림.
return jobs
| true
|
4a8f665e808f0190d3ea45747b784689d04bd86d
|
Python
|
ace-racer/ContextualRecommender
|
/modeling/tag_generation/TagGeneratorBase.py
|
UTF-8
| 2,540
| 2.625
| 3
|
[] |
no_license
|
import math
import pandas as pd
import numpy as np
import os
import configurations
import constants
from base_operations import base_operations
class TagGeneratorBase(base_operations):
def __init__(self):
self._nan = "nan"
def get_stream_details(self):
print("Reading the stream details...")
complete_stream_details_df = pd.read_csv(configurations.COMPLETE_STREAM_DETAILS_LOCATION, encoding="ISO-8859-1")
if complete_stream_details_df is not None:
complete_stream_details_dict = {}
self._stream_id_stream_title_dict = {}
for _, row in complete_stream_details_df.iterrows():
stream_id = str(row["DECKID"])
stream_title = str(row["DECKNAME"])
row_content = str(row["HTML_CONTENT"])
# TODO: add the card title and the module name to the content on which the tags can be generated
card_title =str(row["CARDTITLE"])
module_name = str(row["MODULENAME"])
if row_content and self._nan not in row_content:
# if the stream ID already exists in the dictionary
if complete_stream_details_dict.get(stream_id):
existing_content = complete_stream_details_dict[stream_id]
new_content = existing_content + "\n" + row_content.strip()
complete_stream_details_dict[stream_id] = new_content
else:
complete_stream_details_dict[stream_id] = row_content.strip()
self._stream_id_stream_title_dict[stream_id] = stream_title
return complete_stream_details_dict
def create_stream_tag_mapping_file(self, stream_id_tag_list):
"""
Creates the stream tag mapping file
"""
output_content = ""
if stream_id_tag_list and len(stream_id_tag_list) > 0:
for stream_id_tag in stream_id_tag_list:
output_content += str(stream_id_tag[1]) + "," + stream_id_tag[0] + "," + self._stream_id_stream_title_dict.get(stream_id_tag[0]) + "\n"
with open(configurations.STREAM_TAG_MAPPING_FILE_LOCATION, "w", encoding = "ISO-8859-1") as fw:
fw.writelines(output_content)
print("Output tags written to file here: " + configurations.STREAM_TAG_MAPPING_FILE_LOCATION)
def generate_tags(self):
pass
| true
|
70de54b1cbc995d0cdbb9f8869a7f204dc12c467
|
Python
|
jean/reg
|
/reg/predicate.py
|
UTF-8
| 11,071
| 2.859375
| 3
|
[] |
no_license
|
from .sentinel import NOT_FOUND
import inspect
from .argextract import KeyExtractor, ClassKeyExtractor, NameKeyExtractor
from .error import RegistrationError
class Predicate(object):
"""A dispatch predicate.
"""
def __init__(self, name, index, get_key=None, fallback=None,
default=None):
"""
:param name: predicate name. This is used by
:meth:`reg.Registry.register_function_by_name`.
:param index: a function that constructs an index given
a fallback argument; typically you supply either a :class:`KeyIndex`
or :class:`ClassIndex`.
:param get_key: optional :class:`KeyExtractor`.
:param fallback: optional fallback value. The fallback of the
the most generic index for which no values could be
found is used.
:param default: optional predicate default. This is used by
:meth:`.reg.Registry.register_function_by_name`, and supplies
the value if it is not given explicitly.
"""
self.name = name
self.index = index
self.fallback = fallback
self.get_key = get_key
self.default = default
def create_index(self):
return self.index(self.fallback)
def argnames(self):
"""argnames that this predicate needs to dispatch on.
"""
if self.get_key is None:
return set()
return set(self.get_key.names)
def key_by_predicate_name(self, d):
return d.get(self.name, self.default)
def key_predicate(name, get_key=None, fallback=None, default=None):
"""Construct predicate indexed on any immutable value.
:name: predicate name.
:get_key: a :class:`KeyExtractor`. Should return key to dispatch on.
:fallback: a fallback value. By default is ``None``.
:default: optional default value.
:returns: a :class:`Predicate`.
"""
return Predicate(name, KeyIndex, get_key, fallback, default)
def class_predicate(name, get_key=None, fallback=None, default=None):
"""Construct predicate indexed on class.
:name: predicate name.
:get_key: a :class:`KeyExtractor`. Should return class to dispatch on.
:fallback: a fallback value. By default is ``None``.
:default: optional default value.
:returns: a :class:`Predicate`.
"""
return Predicate(name, ClassIndex, get_key, fallback, default)
def match_key(name, func, fallback=None, default=None):
"""Predicate that extracts immutable key according to func.
:name: predicate name.
:func: argument that takes arguments. These arguments are
extracted from the arguments given to the dispatch function.
This function should return what to dispatch on.
:fallback: the fallback value. By default it is ``None``.
:default: optional default value.
:returns: a :class:`Predicate`.
"""
return key_predicate(name, KeyExtractor(func), fallback, default)
def match_instance(name, func, fallback=None, default=None):
"""Predicate that extracts class of instance returned by func.
:name: predicate name.
:func: argument that takes arguments. These arguments are
extracted from the arguments given to the dispatch function.
This function should return an instance; dispatching is done
on the class of that instance.
:fallback: the fallback value. By default it is ``None``.
:default: optional default value.
:returns: a :class:`Predicate`.
"""
return class_predicate(name, ClassKeyExtractor(func), fallback, default)
def match_argname(argname, fallback=None, default=None):
"""Predicate that extracts class of specified argument.
:name: predicate name.
:argname: name of the argument to dispatch on - its class will
be used for the dispatch.
:fallback: the fallback value. By default it is ``None``.
:default: optional default value.
:returns: a :class:`Predicate`.
"""
return class_predicate(argname, NameKeyExtractor(argname),
fallback, default)
def match_class(name, func, fallback=None, default=None):
"""Predicate that extracts class returned by func.
:name: predicate name.
:func: argument that takes arguments. These arguments are
extracted from the arguments given to the dispatch function.
This function should return a class; dispatching is done
on this class.
:fallback: the fallback value. By default it is ``None``.
:default: optional default value.
:returns: a :class:`Predicate`.
"""
return class_predicate(name, KeyExtractor(func), fallback, default)
class MultiPredicate(object):
def __init__(self, predicates):
self.predicates = predicates
self.predicate_names = set(
[predicate.name for predicate in predicates])
def create_index(self):
return MultiIndex(self.predicates)
def get_key(self, d):
return tuple([predicate.get_key(d) for predicate in self.predicates])
def argnames(self):
result = set()
for predicate in self.predicates:
result.update(predicate.argnames())
return result
def key_by_predicate_name(self, d):
result = []
for predicate in self.predicates:
result.append(predicate.key_by_predicate_name(d))
return tuple(result)
class Index(object):
def add(self, key, value):
raise NotImplementedError # pragma: nocoverage
def get(self, key, default=None):
raise NotImplementedError # pragma: nocoverage
def permutations(self, key):
raise NotImplementedError # pragma: nocoverage
def fallback(self, key):
raise NotImplementedError # pragma: nocoverage
class KeyIndex(object):
def __init__(self, fallback=None):
self.d = {}
self._fallback = fallback
def add(self, key, value):
self.d.setdefault(key, set()).add(value)
def get(self, key, default=None):
return self.d.get(key, default)
def permutations(self, key):
"""Permutations for a simple immutable key.
There is only a single permutation: the key itself.
"""
yield key
def fallback(self, key):
"""Return fallback if this index does not contain key.
If index contains permutations of key, then ``NOT_FOUND``
is returned.
"""
for k in self.permutations(key):
if k in self.d:
return NOT_FOUND
return self._fallback
class ClassIndex(KeyIndex):
def permutations(self, key):
"""Permutations for class key.
Returns class and its based in mro order. If a classic class in
Python 2, smuggle in ``object`` as the base class anyway to make
lookups consistent.
"""
for class_ in inspect.getmro(key):
yield class_
if class_ is not object:
yield object
class MultiIndex(object):
def __init__(self, predicates):
self.predicates = predicates
self.indexes = [predicate.create_index() for predicate in predicates]
def add(self, keys, value):
for index, key in zip(self.indexes, keys):
index.add(key, value)
def get(self, keys, default):
matches = []
# get all matching indexes first
for index, key in zip(self.indexes, keys):
match = index.get(key)
# bail out early if None or any match has 0 items
if not match:
return default
matches.append(match)
# sort matches by length.
# this allows cheaper intersection calls later
matches.sort(key=lambda match: len(match))
result = None
for match in matches:
if result is None:
result = match
else:
result = result.intersection(match)
# bail out early if there is nothing left
if not result:
return default
return result
def permutations(self, key):
return multipredicate_permutations(self.indexes, key)
def fallback(self, keys):
result = None
for index, key in zip(self.indexes, keys):
for k in index.permutations(key):
match = index.get(k)
if match:
break
else:
# no matching permutation for this key, so this is the fallback
return index._fallback
if result is None:
result = match
else:
result = result.intersection(match)
# as soon as the intersection becomes empty, we have a failed
# match
if not result:
return index._fallback
# if all predicates match, then we don't find a fallback
return NOT_FOUND
class PredicateRegistry(object):
def __init__(self, predicate):
self.known_keys = set()
self.predicate = predicate
self.index = self.predicate.create_index()
def register(self, key, value):
if key in self.known_keys:
raise RegistrationError(
"Already have registration for key: %s" % (key,))
self.index.add(key, value)
self.known_keys.add(key)
def key(self, d):
return self.predicate.get_key(d)
def key_by_predicate_name(self, d):
return self.predicate.key_by_predicate_name(d)
def argnames(self):
return self.predicate.argnames()
def component(self, key):
return next(self.all(key), None)
def fallback(self, key):
return self.index.fallback(key)
def all(self, key):
for p in self.index.permutations(key):
result = self.index.get(p, NOT_FOUND)
if result is not NOT_FOUND:
yield tuple(result)[0]
class SingleValueRegistry(object):
def __init__(self):
self.value = None
def register(self, key, value):
if self.value is not None:
raise RegistrationError(
"Already have registration for key: %s" % (key,))
self.value = value
def key(self, d):
return ()
def key_by_predicate_name(self, d):
return ()
def argnames(self):
return set()
def component(self, key):
return self.value
def fallback(self, key):
return None
def all(self, key):
yield self.value
# XXX transform to non-recursive version
# use # http://blog.moertel.com/posts/2013-05-14-recursive-to-iterative-2.html
def multipredicate_permutations(indexes, keys):
first = keys[0]
rest = keys[1:]
first_index = indexes[0]
rest_indexes = indexes[1:]
if not rest:
for permutation in first_index.permutations(first):
yield (permutation,)
return
for permutation in first_index.permutations(first):
for rest_permutation in multipredicate_permutations(
rest_indexes, rest):
yield (permutation,) + rest_permutation
| true
|
84f35f95dbe1404546f5723eee9072e3783611c7
|
Python
|
Nicolas810/Programacion2-clase30-03
|
/Ejecicio2-clase3.py
|
UTF-8
| 93
| 3.359375
| 3
|
[] |
no_license
|
edad= int(input("Ingrese su edad:"))
i=1
for i in range(edad+1):
print(i)
i=i+1
| true
|
c1c9b5d4ceebcdb459b4809c632391e77e934806
|
Python
|
drbrhbym/III_Python_class
|
/1218Demo/rps2.py
|
UTF-8
| 315
| 3.6875
| 4
|
[] |
no_license
|
import random
my = int(input("[0] 蟲 [1] 雞 [2] 老虎 [3] 棒子"))
com = random.randint(0, 2)
trans = ["蟲", "雞", "老虎", "棒子"]
print("my:", trans[my])
print("com:", trans[com])
if my == (com + 1 ) % 4:
print("you win")
elif com == (my + 1) %4:
print("you lose")
else:
print("平手")
| true
|
f492f3f464bd5301255800d2e266bb513625c9d5
|
Python
|
hwngenius/leetcode
|
/hot_100/101.py
|
UTF-8
| 792
| 3.34375
| 3
|
[] |
no_license
|
# Definition for a binary tree node.
class TreeNode:
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution:
def isSymmetric(self, root: TreeNode) -> bool:
def myfun(node1:TreeNode,node2:TreeNode)->bool:
if node1 and node2:
if node1.val!=node2.val:
return False
return myfun(node1.left,node2.right) and myfun(node1.right,node2.left)
if not node1 and not node2:
return True
if not root:return True
return myfun(root.left,root.right)
# 执行用时:
# 52 ms
# , 在所有 Python3 提交中击败了
# 15.45%
# 的用户
# 内存消耗:
# 14.9 MB
# , 在所有 Python3 提交中击败了
# 13.73%
# 的用户
| true
|
2b85f2c9333448eb6120bcda905545322ce1285d
|
Python
|
kangli-bionic/leetcode-1
|
/384.py
|
UTF-8
| 961
| 4
| 4
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env python
# coding=utf-8
'''
学到了:
1、如何使用python中的lambda表达式: lambda 参数:操作(参数),lambda表达式就是一个函数而已,
多用于短的函数。lambda同样可以不带参数,就如此题中的那样
2、random.sample([list],length of samples)
'''
import random
class Solution(object):
def __init__(self, nums):
"""
:type nums: List[int]
"""
self.reset = lambda : nums
self.shuffle = lambda : random.sample(nums,len(nums))
def reset(self):
"""
Resets the array to its original configuration and return it.
:rtype: List[int]
"""
def shuffle(self):
"""
Returns a random shuffling of the array.
:rtype: List[int]
"""
# Your Solution object will be instantiated and called as such:
# obj = Solution(nums)
# param_1 = obj.reset()
# param_2 = obj.shuffle()
| true
|
6be5a388888de8c4aa31e1b9c589716eb1da1245
|
Python
|
17mirinae/Python
|
/Python/DAYEA/수학2/소수 구하기.py
|
UTF-8
| 280
| 3.34375
| 3
|
[] |
no_license
|
import math
def prime(x):
y = int(math.sqrt(x))+1
if x == 1:
return False
for i in range(2, y):
if x % i == 0:
return False
return True
M, N = map(int, input().split())
for j in range(M, N+1):
if prime(j) == True:
print(j)
| true
|
7eeaafb157d169508787479b7dec04e11aab1e7e
|
Python
|
bluesky/bluesky-kafka
|
/bluesky_kafka/tests/test_basic_consumer.py
|
UTF-8
| 2,841
| 2.78125
| 3
|
[
"BSD-3-Clause"
] |
permissive
|
import pytest
from bluesky_kafka.consume import BasicConsumer
@pytest.mark.parametrize(
"bootstrap_servers, consumer_config_bootstrap_servers",
[
([], ""),
(["localhost:9092"], "localhost:9092"),
(["localhost:9091", "localhost:9092"], "localhost:9091,localhost:9092"),
],
)
def test_bootstrap_servers(bootstrap_servers, consumer_config_bootstrap_servers):
"""
This test targets combining bootstrap servers specified
with the `bootstrap_servers` parameter and in the `consumer_config`.
"""
bluesky_consumer = BasicConsumer(
topics=["abc"],
bootstrap_servers=bootstrap_servers,
group_id="abc",
consumer_config={},
)
assert (
bluesky_consumer._consumer_config["bootstrap.servers"]
== consumer_config_bootstrap_servers
)
def test_bootstrap_servers_in_consumer_config():
"""
This test verifies that ValueError is raised when the `consumer_config`
dictionary includes the `bootstrap.servers` key.
"""
with pytest.raises(ValueError) as excinfo:
BasicConsumer(
topics=["abc"],
bootstrap_servers=["localhost:9092"],
group_id="abc",
consumer_config={"bootstrap.servers": ""},
)
assert (
"do not specify 'bootstrap.servers' in consumer_config dictionary, "
"use only the 'bootstrap_servers' parameter" in str(excinfo.value)
)
def test_bootstrap_servers_not_list():
with pytest.raises(TypeError) as excinfo:
BasicConsumer(
topics=["abc"],
bootstrap_servers="localhost:9092",
group_id="abc",
consumer_config={},
)
assert "parameter `bootstrap_servers` must be a sequence of str, not str" in str(
excinfo.value
)
def test_bad_consumer_config():
with pytest.raises(ValueError) as excinfo:
BasicConsumer(
topics=["abc"],
bootstrap_servers=["localhost:9092"],
group_id="abc",
consumer_config={
"group.id": "raise an exception!",
},
)
assert (
"do not specify 'group.id' in consumer_config, use only the 'group_id' parameter"
in str(excinfo.value)
)
def test_redact_password_from_str():
basic_consumer = BasicConsumer(
topics=["abc"],
bootstrap_servers=["localhost:9091", "localhost:9092"],
group_id="def",
consumer_config={
"sasl.password": "PASSWORD",
},
)
assert str(basic_consumer) == (
"<class 'bluesky_kafka.consume.BasicConsumer'>("
"topics=['abc'], "
"consumer_config={"
"'sasl.password': '****', "
"'group.id': 'def', "
"'bootstrap.servers': 'localhost:9091,localhost:9092'}"
")"
)
| true
|
6d08480f34b83680861bd26a617e2cb3aa85b2ee
|
Python
|
kurtrm/predicting_equipment_failure
|
/notebooks/frag_tools.py
|
UTF-8
| 12,820
| 3.140625
| 3
|
[
"MIT"
] |
permissive
|
"""
Various functions and classes made while developing
pipelines and/or cleaning data.
"""
import json
from typing import List, Text, Callable
import yaml
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.preprocessing import StandardScaler, LabelBinarizer
import googlemaps
import pandas as pd
# ================== Transformer ==========================
class EquipmentScaler(BaseEstimator, TransformerMixin):
"""
Scaler meant to except columns from the equipment
data set.
"""
def __init__(self, attr_names: List) -> None:
"""
Constructor takes a list of column headers
to be passed into the dataframe.
"""
self.attr_names = attr_names
def fit(self, X: pd.core.frame.DataFrame) -> 'EquipmentScaler':
"""
Made available for use in fit_transform.
"""
return self
def transform(self, X: pd.core.frame.DataFrame) -> pd.core.frame.DataFrame:
"""
Apply the standard scaler to the selected columns
of the copied dataframe.
"""
X_copy = X.copy()
scaler = StandardScaler()
X_copy[self.attr_names] = scaler.fit_transform(X_copy[self.attr_names].values)
return X_copy
class TargetBinarizer(BaseEstimator, TransformerMixin):
"""
Scaler meant to except columns from the equipment
data set.
"""
def __init__(self, target_name: Text) -> None:
"""
Constructor takes the target name
to be passed into the dataframe.
"""
self.target_name = target_name
def fit(self, X: pd.core.frame.DataFrame) -> 'TargetBinarizer':
"""
Made available for use in fit_transform.
"""
return self
def transform(self, X: pd.core.frame.DataFrame) -> pd.core.frame.DataFrame:
"""
Apply the label binarizer to the target column.
"""
X_copy = X.copy()
binarizer = LabelBinarizer()
X_copy[self.target_name] = binarizer.fit_transform(X_copy[self.target_name].values)
return X_copy
class NameChanger(BaseEstimator, TransformerMixin):
"""
Change column headers to better more readable names.
"""
def __init__(self, column_names: List=None) -> None:
"""
Take the list of column_names as an optional argument.
"""
if column_names is None:
self.column_names = ['date', 'temp', 'humidity',
'Operator', 'Measure1', 'Measure2',
'Measure3', 'Measure4', 'Measure5',
'Measure6', 'Measure7', 'Measure8',
'Measure9', 'Measure10', 'Measure11',
'Measure12', 'Measure13', 'Measure14',
'Measure15', 'hours_since_prev_fail',
'failure', 'year', 'month', 'day-of-month',
'day-of-week', 'hour', 'minute', 'second']
else:
self.column_names = column_names
def fit(self, X: pd.core.frame.DataFrame) -> 'NameChanger':
"""
Fit made available for fit_transform.
"""
return self
def transform(self, X: pd.core.frame.DataFrame) -> pd.core.frame.DataFrame:
"""
Change the column headers to better names.
"""
X_copy = X.copy()
X_copy.columns = self.column_names
return X_copy
class BackupMakeDummies(BaseEstimator, TransformerMixin):
"""
For categorical features, make dummies and
concatentate them with the original dataframe.
"""
def __init__(self, attr_names: List) -> None:
"""
Takes a list of attr_names and col_names.
The order of the column names should correspond
to the expected ordering of the dummie columns.
Assumes the user has done preliminary data exploration.
"""
self.attr_names = attr_names
self._daysofweek = ['Monday',
'Tuesday',
'Wednesday',
'Thursday',
'Friday', 'Saturday', 'Sunday']
def fit(self, X: pd.core.frame.DataFrame) -> 'MakeDummies':
"""
Made available for fit_transform.
"""
return self
def transform(self, X: pd.core.frame.DataFrame) -> pd.core.frame.DataFrame:
"""
Transform the selected columns into separate binary columns,
drop the originals, and concatenate them to the original dataframe.
"""
X_copy = X.copy()
dummies = pd.get_dummies(X_copy, columns=self.attr_names)
if 'day-of-week' in self.attr_names:
dummies = dummies.rename(columns={f'day-of-week_{i}': day
for i, day in enumerate(self._daysofweek, 1)})
if 'Operator' in self.attr_names:
dummies = dummies.rename(columns={f'Operator_Operator{i}': f'Operator{i}'
for i in range(1, 9)})
return dummies
class DropColumns(BaseEstimator, TransformerMixin):
"""
Drop columns from the final transformed df.
"""
def __init__(self, column_names: List) -> None:
"""
Return a dataframe that drops the columns.
"""
self.column_names = column_names
def fit(self, X: pd.core.frame.DataFrame) -> 'DropColumns':
"""
Made available for fit_transform.
"""
return self
def transform(self, X: pd.core.frame.DataFrame) -> pd.core.frame.DataFrame:
"""
Drop the columns.
"""
X_copy = X.copy()
return X_copy.drop(self.column_names, axis=1)
class AddressLatLong(BaseEstimator, TransformerMixin):
"""
Transformer to turn all of the current lat/longs
to their actual lat/longs.
"""
def fit(self, X: pd.core.frame.DataFrame) -> 'AddressLatLong':
"""
Made available for fit_transform.
"""
return self
def transform(self, X: pd.core.frame.DataFrame) -> pd.core.frame.DataFrame:
"""
Extract json from file and replace the
latitude and longitude columns in the dataframe.
"""
X_copy = X.copy()
path_all = '/mnt/c/Users/kurtrm/' \
'projects/predicting_equipment_failure/' \
'src/static/data/geocoded_address.json'
path_corrected = '/mnt/c/Users/kurtrm/' \
'projects/predicting_equipment_failure/' \
'src/static/data/corrected_addresses.json'
with open(path_all, 'r') as f:
geocoded_all = json.load(f)
with open(path_corrected, 'r') as f:
geocoded_corrected = json.load(f)
lat_longs = pd.DataFrame([location[0]['geometry']['location']
for location in geocoded_all])
X_copy[['Latitude', 'Longitude']] = lat_longs
# Below, these addresses are hard coded corrections to lat_longs
# Indices of bad addresses
bad_addresses = [12, 15, 47, 107, 218, 227, 254, 381, 383, 386, 396,
423, 518, 521, 562, 570, 592, 656, 700, 727, 805, 969,
1038, 1092, 1121, 1207, 1251, 1273, 1360, 1384, 1387,
1403, 1424, 1462, 1464, 1671]
corrected = [location['geometry']['location']
for location in geocoded_corrected]
for bad_address, correction in zip(bad_addresses, corrected):
X_copy.at[bad_address, ['Latitude', 'Longitude']] = correction['lat'], correction['lng']
return X_copy
class CleanAddresses(BaseEstimator, TransformerMixin):
"""
Take the addresses from the raw dataframe and combine them.
"""
def fit(self, X: pd.core.frame.DataFrame) -> 'CleanAddresses':
"""
Made available for fit_transform.
"""
return self
def transform(self, X: pd.core.frame.DataFrame, geocode: bool=False) -> pd.core.frame.DataFrame:
"""
Combine the address columns.
"""
X_copy = X.copy()
location_info = X_copy[['AssetLocation',
'AssetCity',
'AssetState',
'AssetZip']]
joined_series = location_info.apply(lambda x: ", ".join(x.tolist()),
axis=1)
if geocode:
geocode_data(joined_series.tolist(), to_file=geocode)
return joined_series
class Binarize(BaseEstimator, TransformerMixin):
"""
Binarize columns.
"""
def __init__(self, attr_names: List) -> None:
"""
Initialize with the names of the attributes to
apply the transformation.
"""
self.attr_names = attr_names
def fit(self, X: pd.core.frame.DataFrame) -> 'Binarize':
"""
Made available for fit_transform.
"""
return self
def transform(self, X: pd.core.frame.DataFrame) -> pd.core.frame.DataFrame:
"""
Binarize the attr_names columns to 0 and 1.
"""
X_copy = X.copy()
X_copy[self.attr_names] = X_copy[['VegMgmt',
'PMLate',
'WaterExposure',
'MultipleConnects',
'Storm']].applymap(lambda x: 1 if 'Y' in x else 0)
return X_copy
class CurrentMakeDummies(BaseEstimator, TransformerMixin):
"""
For categorical features, make dummies and
concatentate them with the original dataframe.
"""
def __init__(self, attr_names: List) -> None:
"""
Takes a list of attr_names and col_names.
The order of the column names should correspond
to the expected ordering of the dummie columns.
Assumes the user has done preliminary data exploration.
"""
self.attr_names = attr_names
def fit(self, X: pd.core.frame.DataFrame) -> 'CurrentMakeDummies':
"""
Made available for fit_transform.
"""
return self
def transform(self, X: pd.core.frame.DataFrame) -> pd.core.frame.DataFrame:
"""
Transform the selected columns into separate binary columns,
drop the originals, and concatenate them to the original dataframe.
"""
X_copy = X.copy()
dummies = pd.get_dummies(X_copy, columns=self.attr_names)
return dummies
class ChangeTypes(BaseEstimator, TransformerMixin):
"""
Change the types of columns
"""
def __init__(self, attr_names: List, funcs: List[Callable]) -> None:
"""
Accepts a list of the column names to change.
The types must be in the same order as the column names.
"""
self.attr_names = attr_names
self.funcs = funcs
def fit(self, X: pd.core.frame.DataFrame) -> 'ChangeTypes':
"""
Made available for fit_transform.
"""
return self
def transform(self, X: pd.core.frame.DataFrame) -> pd.core.frame.DataFrame:
"""
Transform the the dataframe columns into self.types.
"""
X_copy = X.copy()
for column, func in zip(self.attr_names, self.funcs):
X_copy[column] = X_copy[column].apply(func)
return X_copy
# ================ Functions =======================
def geocode_data(addresses: List, to_file: bool=False) -> List:
"""
Take a list of addresses and convert them to
lat/longs via the googlemaps geocoding API.
"""
with open('/home/kurtrm/.secrets/geocoding.yaml', 'r') as f:
key = yaml.load(f)
gmaps = googlemaps.Client(key=key['API_KEY'])
geocoded = [gmaps.geocode(address) for address in addresses]
if to_file:
path = '/mnt/c/Users/kurtrm/' \
'projects/predicting_equipment_failure/' \
'src/static/data/geocoded_address.json'
with open(path, 'w') as f:
json.dump(geocoded, f)
return geocode_data
def custom_zip_cleaning(zipcode: int) -> int:
"""
Takes a zipcode from the transformer dataset
and makes it an intent:
"""
try:
return int(zipcode[:5])
except ValueError:
return 30189
| true
|
bdeaf89956e9c11ee7ad098ea120920a0660e921
|
Python
|
harshitsharmaiitkanpur/cs251_exam
|
/CS 251/ASSIGNMENTS/assignment 3/160283/QN2.py
|
UTF-8
| 3,535
| 2.578125
| 3
|
[] |
no_license
|
# coding: utf-8
# In[1]:
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import sys
import os
# In[2]:
data = pd.read_csv(sys.argv[1])
# In[3]:
data=np.array(data)
# In[4]:
x_train = data[:,0]
y_train = data[:,1]
# In[5]:
xx= np.zeros((x_train.size,2))
# In[6]:
xx
# In[7]:
xx[:,1] = x_train
# In[8]:
xx
# In[9]:
xx[:,0] = 1
# In[10]:
xx
# In[11]:
X_train = xx
# In[12]:
print X_train
# In[13]:
w = np.random.rand(2,1)
print w
# In[14]:
w.shape
# In[15]:
y_train.shape
# In[16]:
plt.plot(x_train,y_train,'ro')
# In[17]:
plt.show()
# In[18]:
x1 = X_train.transpose()
# In[19]:
x1
# In[20]:
y=w.transpose().dot(x1)
# In[21]:
w.shape
# In[22]:
y.shape
# In[23]:
y1=np.array(y)
# In[24]:
y1.shape
# In[25]:
y1=y1.flatten('F')
# In[26]:
y1.shape
# In[27]:
x_train.shape
# In[28]:
y1.shape
# In[29]:
plt.plot(x_train,y_train,'ro')
plt.plot(x_train,y1)
# In[30]:
plt.show()
# In[31]:
X_train.shape
# In[32]:
X_train
# In[33]:
w1=X_train.transpose().dot(X_train)
# In[34]:
w1
# In[35]:
w1.shape
# In[36]:
w_inv=np.linalg.inv(w1)
# In[37]:
w_inv
# In[38]:
w_inv.shape
# In[39]:
a=X_train.transpose().dot(y_train).reshape(2,1)
# In[40]:
a
# In[41]:
y_train
# In[42]:
a=a.reshape(2,1)
# In[43]:
w_direct=w_inv.dot(a)
# In[44]:
w_direct
# In[45]:
w_direct.shape
# In[46]:
plt.plot(x_train,y_train,'ro')
# In[47]:
plt.show()
# In[48]:
x1.shape
# In[49]:
x1
# In[50]:
y_axis=w_direct.transpose().dot(x1)
# In[51]:
y_axis.shape
# In[52]:
x_train.shape
# In[53]:
plt.plot(x_train,y_axis.T)
plt.plot(x_train,y_train)
plt.show()
# In[54]:
plt.show()
# In[55]:
w.shape
# In[56]:
w = np.random.rand(2,1)
# In[57]:
w.shape
# In[58]:
y_train.shape
# In[59]:
for i in range(1,3):
for j in range(1,10001):
x=data[j-1,0]
y=data[j-1,1]
x2=np.array([1,x])
x_new=x2.reshape(2,1)
#print w.shape
w=w-0.00000001*(w.transpose().dot(x_new) - y)*(x_new)
#print w.shape
if j%100==0:
ala=0
plt.plot(x_train,y_train)
y_ax=w.transpose().dot(X_train.T)
#print w.shape
plt.plot(x_train,y_ax.T)
plt.show()
#plt.scatter(x_train,y_train,s=100)
#plt.plot(x_train,y_ax.T)
#plt.show()
#print w
# In[89]:
plt.plot(x_train,y_train)
plt.plot(x_train,y_ax.T)
plt.show()
# In[91]:
data2 = pd.read_csv(sys.argv[2])
# In[92]:
data2=np.array(data2)
# In[93]:
print data2
# In[94]:
x_test = data2[:,0]
y_test = data2[:,1]
# In[95]:
x_test
# In[96]:
y_test=y_test.reshape(10500,1)
# In[97]:
y_test.shape
# In[98]:
xx2 = np.zeros((x_test.size,2))
# In[99]:
xx2
# In[100]:
xx2[:,1] = x_test
# In[101]:
xx2
# In[102]:
xx2[:,0]=1
# In[103]:
xx2
# In[104]:
X_test=xx2
# In[105]:
X_test
# In[106]:
y_pred1=X_test.dot(w)
# In[107]:
y_pred1
# In[108]:
y_pred1.shape
# In[109]:
ans=np.sqrt(np.mean((y_pred1-y_test)**2))
# In[110]:
print ans
# In[111]:
b=X_test.T.dot(y_test)
# In[158]:
w_direct=np.linalg.inv(X_test.T.dot(X_test)).dot(b)
w_direct
# In[112]:
y_pred2=X_test.dot(w_direct)
# In[113]:
y_pred2
# In[114]:
y_pred2=y_pred2.reshape(10500,1)
# In[115]:
y_pred2.shape
# In[116]:
y_test.shape
# In[117]:
ans1=np.sqrt(np.mean((y_pred2-y_test)**2))
# In[118]:
print ans1 + 40
| true
|
bfecd78ab8b66554c766d362768129d2c41d8512
|
Python
|
Rifleman354/Python
|
/Python Crash Course/Chapter 9 Exercises/techpriestDatabase4.py
|
UTF-8
| 2,469
| 3.296875
| 3
|
[] |
no_license
|
class TP_Database():
'''Tech priest database class'''
def __init__(self, name, rank):
'''Initializes the name and rank attributes'''
self.name = name
self.rank = rank
self.login_attempts = 0
def describe_TP(self, *extraDesc):
'''Summarizes the information about the tech priest'''
print(self.rank.title() + ' ' + self.name.title() + ' has the following information in the database:')
for desc in extraDesc:
print('- ' + desc)
def greet_TP(self):
'''Greets the tech priest'''
print('\nHello ' + self.rank.title() + ' ' + self.name.title() + '!')
def increment_login_attempts(self, attempts):
'''Increments the amount of logins'''
self.login_attempts += attempts
def reset_login_attempts(self):
'''Resets the login attempts to zero'''
self.login_attempts = 0
def read_login_attempts(self):
'''Reads the attempts the user has logged in'''
print('The current user has logged in with ' + str(self.login_attempts) + ' times.')
class Database_Admin_Login(TP_Database):
'''Class that gives admin priviledges to the developer'''
def __init__(self, name, rank):
'''Initializes the priviledges attribute'''
super().__init__(name, rank)
self.priviledges = []
def show_priviledges(self):
'''Prints the available commands the admin has'''
priviledges_ready = ["Can Add Post", "Can Delete Post", "Can Ban User"]
while priviledges_ready:
current_priviledges = priviledges_ready.pop()
self.priviledges.append(current_priviledges)
print('\nThe following priviledges are yours to use in the database: ')
for commands in self.priviledges:
print('- ' + commands.title())
class Database_Priviledges_Login(TP_Database):
'''Class that gives priviledges to moderators'''
def __init__(self, name, rank):
'''Initializes the priviledges attribute'''
super().__init__(name, rank)
self.priviledges2 = []
def show_priviledges2(self):
'''Prints the available commands the moderators have'''
priviledges_ready2 = ["Can Add Post", "Can Delete Post"]
while priviledges_ready2:
current_priviledges2 = priviledges_ready2.pop()
self.priviledges2.append(current_priviledges2)
print('\nThe following priviledges are yours to use in the database: ')
for commands2 in self.priviledges2:
print('- ' + commands2.title())
Moderator = Database_Priviledges_Login("helios", "archmagos")
Moderator.show_priviledges2()
| true
|
563fb4e90b582da7d3945033d629108e68252d44
|
Python
|
gdogpwns/RespireBookScanner
|
/HaitiBookScanner.py
|
UTF-8
| 7,385
| 3.265625
| 3
|
[] |
no_license
|
# isbntools documentation found at https://isbntools.readthedocs.io/en/latest/info.html
# Using this too: https://stackoverflow.com/questions/26360699/how-can-i-get-the-author-and-title-by-knowing-the-isbn-using-google-book-api
import sys
import openpyxl
import datetime
from isbntools.app import *
# Service set for Google Books
service = "wcat"
# Main menu
def main():
print("Main Menu:")
print("To register new books, type 'register'")
print("To check in books, type 'check in'")
print("To check out books, type 'check out'")
print("To exit, type 'exit'")
choice = input("")
if choice in ["register", "Register", "REGISTER", "'register'"]:
print("")
register_book()
elif choice in ["check in", "checkin", "Check In", "Check in", "CHECKIN", "CHECK IN", "CheckIn", "'check in'"]:
print("")
check_in()
elif choice in ["check out", "checkout", "Check Out", "Check out", "CHECKOUT", "CHECK OUT", "CheckOut", "'check out'"]:
print("")
check_out()
elif choice in ["exit", "Exit", "EXIT"]:
exit()
else:
print("The inputted value is not an option. Try again.")
print("")
main()
# Allows for registration of books into database.
def register_book():
inventory_workbook = openpyxl.load_workbook("BookDatabase.xlsx")
book_inventory_sheet = inventory_workbook["Book Inventory"]
book = input("Scan barcode to register book or type 'menu': ")
if book == "menu":
print("")
main()
else:
isbn_list = []
for row in book_inventory_sheet["C"]:
isbn_list.append(row.value)
if book in isbn_list:
cell_row = (isbn_list.index(book) + 1)
total_quantity = book_inventory_sheet["D" + str(cell_row)]
in_stock = book_inventory_sheet["E" + str(cell_row)]
total_quantity.value = (total_quantity.value + 1)
in_stock.value = (in_stock.value + 1)
print("At least one of this book already registered. Total quantity is now: " + str(total_quantity.value))
print("")
inventory_workbook.save("BookDatabase.xlsx")
register_book()
else:
meta_dict = meta(book, service)
authors_list = meta_dict["Authors"]
authors = ",".join(authors_list)
title = meta_dict["Title"]
# Appends the info to the last column, and sets "Total Quantity" and "In Stock" to 1
book_inventory_sheet.append([title, authors, book, 1, 1])
print (title + " by " + authors + " added to database.")
print("")
inventory_workbook.save("BookDatabase.xlsx")
register_book()
# Allows the library to scan books in once returned.
def check_in():
time = datetime.datetime.now()
current_date = time.strftime('%d-%m-%Y %H:%M:%S')
inventory_workbook = openpyxl.load_workbook("BookDatabase.xlsx")
book_history_sheet = inventory_workbook["Check Out-In"]
book_inventory_sheet = inventory_workbook["Book Inventory"]
book = input("Scan barcode to check in or type 'menu': ")
print("")
if book == "menu":
main()
else:
inventory_isbn_list = [] # List of all ISBN numbers in Book Inventory sheet
checked_out_list = [] # List of all ISBN numbers in Check In-Out sheet
checked_out_list_raw = []
revised_checked_out_list = [] # List of all books that match the scanned ISBN that are checked out
for row in book_inventory_sheet["C"]:
inventory_isbn_list.append(row.value)
if book in inventory_isbn_list:
i = 0
while i <= (len(inventory_isbn_list) - 1):
name = book_history_sheet["D" + str(i + 2)].value
isbn = book_history_sheet["C" + str(i + 2)].value
row_location = i + 2
checked_out_list.append([name, isbn, row_location])
checked_out_list_raw.append(isbn)
if isbn == book:
revised_checked_out_list.append([name, row_location])
i += 1
if book in checked_out_list_raw:
print ("Select the number next to the name of who is checking the book in:")
n = 0
while n <= (len(revised_checked_out_list) - 1):
print(str(n + 1) + ": " + revised_checked_out_list[n][0])
n += 1
print("")
selected_number = int(input("Enter number next to name here: "))
if selected_number <= n and selected_number > 0:
selected_person = revised_checked_out_list[selected_number - 1][1]
book_history_sheet.delete_rows(selected_person, 1)
cell_row = (inventory_isbn_list.index(book) + 1)
in_stock = book_inventory_sheet["E" + str(cell_row)]
in_stock.value = (in_stock.value + 1)
inventory_workbook.save("BookDatabase.xlsx")
else:
print("")
print("Selected number is not an option. Please try again.")
print("")
check_in()
else:
print ("Book is not currently checked out.")
check_in()
elif book not in inventory_isbn_list:
print("ERROR: This book was never registered. Its ISBN number is not in the database.")
print("")
main()
inventory_workbook.save("BookDatabase.xlsx")
# Allows the library to scan books when checked out.
def check_out():
time = datetime.datetime.now()
current_date = time.strftime('%d-%m-%Y %H:%M:%S')
inventory_workbook = openpyxl.load_workbook("BookDatabase.xlsx")
book_history_sheet = inventory_workbook["Check Out-In"]
book_inventory_sheet = inventory_workbook["Book Inventory"]
book = input("Scan barcode to check out or type 'menu': ")
if book == "menu":
print("")
main()
else:
isbn_list = []
for row in book_inventory_sheet["C"]:
isbn_list.append(row.value)
if book in isbn_list:
cell_row = (isbn_list.index(book) + 1)
in_stock = book_inventory_sheet["E" + str(cell_row)]
meta_dict = meta(book, service)
authors_list = meta_dict["Authors"]
authors = ",".join(authors_list)
title = meta_dict["Title"]
if in_stock.value <= 0:
print("ERROR: The database claims that there are 0 books left in stock. Did you mean to check in?")
print("")
check_out()
else:
checked_out_by = input("Enter the name of who is checking out the book: ")
in_stock.value = (in_stock.value - 1)
book_history_sheet.append([title, authors, book, checked_out_by, current_date])
print(title + " successfully checked out to " + checked_out_by + ". Remaining copies of this book: " + str(in_stock.value))
print("")
inventory_workbook.save("BookDatabase.xlsx")
check_out()
else:
print("ERROR: This book was never registered. Its ISBN number is not in the database.")
check_out()
main()
| true
|
3e942e48fc2da2c8573f8160b298f4a474379457
|
Python
|
Aaaronchen/JS_Encrypt
|
/天气/test.py
|
UTF-8
| 3,931
| 3.078125
| 3
|
[] |
no_license
|
import execjs,time,json,base64
'''
sss0 = "你好siri,今天天气30摄氏度!...++/=1"
sss1 = "你好siri,今天天气30摄氏度!...++/=1"
sss2 = sss1.encode('utf-8')
print(sss2,type(sss2))
print(base64.encodestring(sss2))
print(base64.b64encode(sss2))
'''
from Crypto.Cipher import DES,DES3
from Crypto.Cipher import AES
from binascii import b2a_hex, a2b_hex
class PrpCrypt(object):
def __init__(self, key):
self.key = key.encode('utf-8')
self.mode = AES.MODE_CBC
# 加密函数,如果text不足16位就用空格补足为16位,
# 如果大于16当时不是16的倍数,那就补足为16的倍数。
def encrypt(self, text):
text = text.encode('utf-8')
cryptor = AES.new(self.key, self.mode, b'0000000000000000')
# 这里密钥key 长度必须为16(AES-128),
# 24(AES-192),或者32 (AES-256)Bytes 长度
# 目前AES-128 足够目前使用
length = 16
count = len(text)
if count < length:
add = (length - count)
# \0 backspace
# text = text + ('\0' * add)
text = text + ('\0' * add).encode('utf-8')
elif count > length:
add = (length - (count % length))
# text = text + ('\0' * add)
text = text + ('\0' * add).encode('utf-8')
self.ciphertext = cryptor.encrypt(text)
# 因为AES加密时候得到的字符串不一定是ascii字符集的,输出到终端或者保存时候可能存在问题
# 所以这里统一把加密后的字符串转化为16进制字符串
return b2a_hex(self.ciphertext)
# 解密后,去掉补足的空格用strip() 去掉
def decrypt(self, text):
cryptor = AES.new(self.key, self.mode, b'0000000000000000')
plain_text = cryptor.decrypt(a2b_hex(text))
# return plain_text.rstrip('\0')
return bytes.decode(plain_text).rstrip('\0')
class DESUtil(object):
def __init__(self, key):
self.key = key.encode('utf-8')
self.__BLOCK_SIZE_8 = self.BLOCK_SIZE_8 = DES.block_size
self.IV = "9ff4453b".encode('utf-8') # __IV = chr(0)*8
def encryt(self,text):
text = text.encode('utf-8')
cipher = DES.new(self.key, DES.MODE_CBC, self.IV)
x = self.__BLOCK_SIZE_8 - (len(text) % self.__BLOCK_SIZE_8)
if x != 0:
text = text + chr(x)*x
msg = cipher.encrypt(text)
# msg = base64.urlsafe_b64encode(msg).replace('=', '')
msg = base64.b64encode(msg)
return msg
def decrypt(self,enStr):
# enStr += (len(enStr) % 4)*"="
# decryptByts = base64.urlsafe_b64decode(enStr)
decryptByts = base64.b64decode(enStr)
cipher = DES.new(self.key, DES.MODE_CBC,self.IV)
msg = cipher.decrypt(decryptByts)
b2a_hex_ciphertext = str(b2a_hex(msg), encoding = "utf-8")
print(b2a_hex_ciphertext)
result = base64_en(a2b_hex(b2a_hex_ciphertext))
print(result)
return result
def base64_en(sss):
return str(base64.b64encode(sss),'utf-8')
from pyDes import *
import base64
# Des CBC
# 自定IV向量
def DesEncrypt(res,key,iv=b"9ff4453b"):
Des_Key = (key+"0000")[0:8]
k = des(Des_Key, CBC, iv, pad=None, padmode=PAD_PKCS5)
k.setKey(key)
EncryptStr = k.encrypt(res)
return base64.b64encode(EncryptStr) #转base64编码返回
def DesDecrypt(res,key,iv=b"9ff4453b"):
Des_Key = (key+"0000")[0:8]
EncryptStr = base64.b64decode(res)
k = des(Des_Key, CBC, iv, pad=None, padmode=PAD_PKCS5)
k.setKey(key)
DecryptStr = k.decrypt(EncryptStr)
#print(DecryptStr)
return DecryptStr
if __name__ == '__main__':
res = "ef69hpSUper0YXGaGvI5GR/duQ1NyiF2R4zlJmTFqqWpDwHH8lJ4ruK1frqZYxHdME1eaaQFkKXed+350LvyGPVPFEp+F/zrdvdWITT91VSqAQE4E0fkOQws94cZ77vYcGzrql47ceIp4nTBuIWFZZpeVEQGteQGKW3XFeCkSS0uzgYcxc2nYLkcjJ45q+sIn6gIB2TIUJGJ4be28WtWdpa1+88+eLwVsTVsFtinpgYXnUm7IHaxkrEEzUp+6NhJC7sTwDl+yA4+owskkiOTRNUCk9FYEeLaBWLsNttMgbgO8Vqm6OZs2MpMsb5VzlYN1ekbEtTgKUksDaPEqOhAAippDSAtrgRi/HFUWJA8z7Ps2FNvVbl/khVOdbBPw4qfdlR2eKRfQ9nnPn8ihRbeiAhwWpZBiMAEL0uG+nGVuGUJfMtAZ11DHoS8+dKvlxjT9zKbALa4Cr1SJw/lb9MEFx7EJnT0L2DSeiUyvw9OPxevDHqPbPsbCgtjjQd8IWQ8E4bmhPVV3//gzwsfJZ/KJiFJfdezovYM99Jryjy2WWqCUURMVm0pMNAPNvSLGG2GOOt56Mg6aBDi3Horj7bifiQs8Pt7yEg1V0BfLD+HFTiOrLQRwalDxeDmyhW96kSAaQz1MQZocuhVJvGckWiSrYKmeBC3vrWGcBinfr2JgKPMBhuS7G3RsPGdcXWh1NNpVOTJjVwKIxRa0RHzmvIkzlc5HFy2monFDoMx+ILm17jKQqhuD7w6s8M10AZ7iwLNN1sHnBSFUvdyCdJGGJOBXnCtF/SnGCmcHFoKq29V1jgwKU0Z7+THbHz1rJtQEiLmeO4AKMCmMjH5wiIFqeCgxBW4BzevRJYnyVSD1GtpCKzCV6yjIbeyQQ5X5JaTOM1H+XpmY5iKNb8u6y3QBxXo+eAGuhc29/+w4Y0tqIRspSr14/op2bRe0emGqIMxm7XFllAagVDJzZSjTw+9BMfMxJXCl9jxXieXOC024PN2GPd2XJ/LOhxj9yY4PYBOscVx+M8txlMupyKuOIcFGx8RKgPVJkFsLZobkzDogQvJNa4OkPDzpG9NOlHjODO6Mm4Uc24JjQqleNb8OGdqO/K6SvnjpwhvNP2PpGEP6eV6Y75NZ1jjDVgckQ9Mf2URCGTyEY2UNNz9Q5d4JByUYOzDMulhPwWULAfAZ1PcmN6iJmZtWzSKnoRNkCl/bsakLWCNdOQkx5/spMkYbVqTyqYq1T4M2So9kUFL/ohk7EnCCQ7vmaexXz6z6Ih6Uy/6bMTBvzd3gCxU9lozL2fhh1sDQ6iD+iqS5pGgEIpfV8STVytA5SSrirAblcw8O0YRTZ2sam+vRPQdd0Debbz5/y1UvzUEG3XqhhuYgCTK1hgppvxR1Mo2aVLTaHvEcyVFNp3zUbYygr2rshD9DKiZcVGnU/c4+WGwOaNPICgqUdpX8Ry9Ig89kz3srY0jyPZN0wwqQMl5mJWg0WY4XTDCh9ytf0gcNUOomdookPvtXKIkS/vLISmUXsrpDd4HU0jX7LhSeKvHgPYcPCYLSxUc1dL30QH94sD4ZB1iaaoqBFYaVachhvIYvl424+xyLAHsZRraphdxwv9JRLz/Ugzm8TNcXdMM8jTRI7WfStDrReK7QT949L76JKyAJ6HGzyYms+Mz5ZAjKcotTc1dTpdLhgi3v/ACf4nY1YS0DF4j1LiyxCx7TSakzBWmKx1SgctRmfgpSGn1toEJr6jqLC2RKA6zihloKwigJO9dmUDgZDjXha+jmdWh1aQNMKryQJ0AH0N0qAs38mWgjmbiNMmFIRVkYpqBYN/u/rs7OvRC2iRyHb+FJzhaC0App/122R3atHyOkBLQ+QDqnKIc/w5ecEHCZ5BsYwU85pyS6ilbcmqRCpwNFUS7gWCmTQEz/R+hkRnHIMEJJkOb0iRTu/MUZxo0vWWYVPClSGbJK1JIGhOaeRiyzLQrY7ui5gvTEpFl0uhJixosHyk0trNoYlaH5kidOuJxCoa74+jhyQ9UrbS4b7v2q/tujgKp/ODSSkaJuH7kXG6nBMBGZ+LoPmUkBEdZ6H8gC5ZffYlNPDwDzKqz3GJEMPhyRrxSScACDbqsZV+5sYYeIw2NgH/tKqgNxQWh/TkHenMuLLUB43jgMHD+QxEPyRAjmYJWdD9g5T3XnE9NnzfNSgXZ3BMS7ORJke52omZUQlMN2GEE9Lm6XF+rUzd8MGexS3CLoGMVrybWbZujcdZUQyZY4DVB4/FIXQlWovKfS66WLxwZ+wK7PadlkzC6a34J3W4dNlos8D+/SIn+DO5CeRaGGPDzz/l/148ke+xW+h7PfHBfGy7+BX3yyz8xB93XVzi6hWR0YopXOktozVer7QnVpGyDRE1eAp+GqWouviSA3Sw2DvjEfbZ7rPqZtKknHBKHOdx2sbRuQHrr8KR06u3IEPMnOSlUFC8KLKQMPeimuBVcTl2azkRjFivIIOAX6NMkwBl2htKT28nh4Soq0uY127mKPx0Hz0fxGNtI8Kz+RCn/D7ysJO/2ZGaPHlhcWB4S4PdMbkTXBcU57SNm01oYlLXKe0d2SNg5LhG/No5dHH1lVKmY+qIbemdyJC7ireA4KpkiQ4pUYCYoCEN+xk/NSwgjbi76WWVN5riQTwuAm3UVvSfR6idRGFCqpaTXZAUDs2U6C/VZg+XL62Ki1QqUg27NQ+WJu9ZfU5vHnoekpa/hyGygxh5V3LdzHN+kasSyZ9ODzZ8ecCPJlvo8Kp0G+KYt+WucWltBJzI13xfcEI9bCQVvQjmiC06CizrTqj8cbp+iyu7QF5bnTrzd5XqHo5epD7dxj3nhy44e+bULUliKY7+uN41vKMrI/Ttald1sf/H+MsSXq1qmA8wIH8aWCnJwW5xvq2xAABQD3V4L5rtBWo4bTGB2DRl4UjExm4WSIa3trygufcLTF/fXxrjzf0zi5wiRX0//DOR5dJbc3Xvzd4ZQzRZhfb1/VfaCRinJCpKj3UoK8YpN8Wp/xCG4edl91ckAfiGhXm32sMkvTd/sVC0/Nb2J0jQ1N4Ab/sloptuPGE2uz72d1zPpMAk6sZigzqUlW5ObCuM8PHhBxQnMnC2BRbVgue/aetpULijFuV/UpsSdcmjK4znOYDO46QWfvVnBdbIvUV2Vt1XI8usAKhCx/aYXh6BSe3ueoyZ7cn8XNNbbXB+5K9Kpc4MOWcjjEk9/kRu7Vk27GBoHmlvvNptBGdQJifaZFO0qE3+aibgN+tvNv8hs+f1i4rUOQi7IXEm8GbsDFffswZVfh9hUu8eY43ptReOC/3xs2DsgKsPRMcaiDtx8xI3DID8asi+yNBfvvma8QoPJB83NwuiPEQUDsp4ryQPdezK9aj8FlckeeLCNH4zrVHib7MvsgLwMsgbszrknK4TlgFCxSbJq3/D170gBxpDcuZYCpWMVr3S+OLwmY21EalHomK5c9FbMSe+SulrJSB6yuMh+6rIt2cTt378Km2xLZEbYOgYHuMFjV3ejGvD0nMbtsFpFXYwMTUhZYy9cUkx2HizUY/iofKCHULEYry2aYonAx9O3q5fyamU3VpJN1RisY5eB6FXM7dVWbedgNh+87/q4Tcjge+JOnjC8JSvJRnD85ehrt6xsj5Iah7hc6goOYETydcnMJaZk0jg6J8aYenuCBucp+cfSd5dlGwKdCBBj9cos5Ys1MaH5Yd9IdcSs1Y9sFabmmU/pbDw5wAACy+6dsrtU6GTWG+dU8/SNedJYPzykXtQyDD1h4mxg2NAhcCqSDkWjHq8LWso3uf8JesAdl/uidYMkjtY+fixXf8cpWNMqcsha+/jDFvKcNj9n5McCHOVcwjg3VIFxzuP2o90J66IFcNgPw3sGh3IYTlMpGBv8b1t3Gwz24IgXpIDLY0y6kiKO4wFUONMRBr2yxmPnylzBNG9Vg72plubrIamGqijBG4EAGB+F+jvJWZBfVfpNdcHa1dwtFaMdqcr+ueUilUVugtoCKoLuPi9afOQB34h47rzGpCETtXojFU1TUSWn/dIi0529Wdhi+AfDlDJdZ224SSsFtw/CTUUKyfvxw8vOm+ffjr0rPEDEQ+eY/Jx9TqluDHk8LXW1ay8rpAkvET7wSIDJg3TJ8tDeRfflnr6G7zdE1B4eEZZqGS2E5EM6JNINKWYMM3dGAffCXQF4JnzRMqTFoBvDrcw5mER/I7gOu6QDFJISbrCpfK8i6yLtAtrHOc6qO4KR/9tSa3ky5a6N9k4gmqmn0hZ2IzNqjdztDLkRv4i3EHs5wcXGauukyucSkIg0inDfhWgw1TDlMlMW/hrkBG4lwEx9ikJh4Lo1W+42JYGgOn+vOl5Fiuy9linMTNv3Kw0RPKuoP8mfYOdAlNj3XiupibeQZJwwaz/nMzeR7/bl/QxxBM+XkVqIOInhD1aMxnZVoefALACgzyGqX5fh18NFr/lfBLP5hwHgmAeYizWlt3Alc1P4syNDTZ99BApZUuEAOiG/yjtsx65PFOpOwDqHnTLuQ6z/kVw/fVavaHjM2V0XuL+dSGhGz9mfV04YM3XWlU9/gEBr13hrltvCN4oJa9Dh9xlkWDXJ/2ldLlpEsqh7avALEHhfdDcsYJViI7kR86QMD+It9AuxZnVYsnNr/EMEmJ4C1CDaMjyPmW5K0rSLateQbX8wE3yMAy30fQGEJq1knkwBvXowvg7IiQAtwNom9W65niEL5Ug4ZsW+5kdHU9zvomIMLdweHl8Y1yhSPLQ7Ctjf8uncLgvfIlKqDUT6M6SQZRJTyMjyczUDxBuNVpeHeqfJ7SX1/ARsH3vx0OANRuV5uy/zmz9MQhyLvwB655vL/h1M8IHLVmb1SGqW7Utm8As9C1hFtU8aQ+4GmdSNVR1OqPIvkgmer/u574W87oX1sUGSZr9TYFCiroOAs4it3UBF8Hiw4mWbUP2bbIf38658WeMrotpXdvbD4TXJFlcYAL3/IMKjjpeA7KP8im5v5F5p1lp5EVF4flKzxeEv7cb7QlbvXhATBRrg1TuecowZm2iAPDt2VxAFDx9X56Tl0hicniQ6MwlE2eMqCo/+3r/8961qHHT1GWcqM01dRnfNth4PBSKLtU8jKUSQGgns8wmp2R2a3xNxNXivGKSsZgXvy/mWSa1xklBrMHV+aBY5381XIGISVF21zVUgyiCL+1AUoBJOcz+YRZ/cuegOxdp2b4vQtJHMYQCVcdcMjD2fTzYaqPeq2HgHCGcQMmS7VwZXJkRP/wgVvjGXl80OuKIWyUbESKYUwDfYiDkrHGdz3ybXH4BAQlmclhsEXko0PQd870d0vTs/O+o9/VfOrUiokZDmsBXish8jKCzBF/SdFtLoMOB+WSacuY3BYjRGzO3gMeB7crrAozbmj4Ltz7po0MgKZHHWekgNQqFWrQZ/G6G4wBmR5gElhiTg/+CrItOa901AS7lS5Bvt+d2EWQwxIoF/w1U2Zx/5/ql7iPWPDA6m6n+JD7lLiLq5XPVyC2etcjMrnvGl+zlgcJL9yTwNKpwoncy3RmctmCEE/11Td1+vmRdZrQKcdjm5Un3nL3AXWQ6kc8TWw93YgZ72Yr+OFdXfOb7vS9JlXYm1h5CtzbVnAZQGa8fvivpCt6WN7MjFMXaaOwMcDWqleyPxt1YDijLbctRRjx5eA14TtNUlhfhcOTC6OSBbzd0rQEDZxN8fizC+RVE8I9JxJHQ1Kh3yvRd5Xd4tW2hYtUjJpi0EIi8/qTrzsUhKuoJ+LDZMmbjc4vfD/SS+XQMHNB6TvLwm9/EVxMc2LWoNkQe2Uohw+/DzMYg7um7970vhVlkoOBQnM3A8jNgo7xvjOw=="
des_key = "863f30c7f96c96fb"
des_iv = b"9ff4453b"
result = DesDecrypt(res,des_key,des_iv)
EncryptStr = base64.b64decode(result).decode('utf-8')
print(EncryptStr)
| true
|
8f5bdb6cbd950b0f5e69781eda12a40d9d6f35db
|
Python
|
xiaochuan-cd/leetcode
|
/multiply.py
|
UTF-8
| 1,036
| 3.109375
| 3
|
[
"MIT"
] |
permissive
|
class Solution:
def multiply(self, num1, num2):
"""
:type num1: str
:type num2: str
:rtype: str
"""
value = [0]*(len(num1)+len(num2))
for i in range(len(num1)-1, -1, -1):
for j in range(len(num2)-1, -1, -1):
value[i+j+1] += int(num1[i])*int(num2[j])
carry = 0
for i in range(len(value)-1, -1, -1):
value[i] += carry
carry, value[i] = divmod(value[i], 10)
i = 0
while i != len(value)-1 and value[i] == 0:
i += 1
return ''.join([str(x) for x in value[i:]])
if __name__ == "__main__":
# print(Solution().multiply(
# '2322267896718392316129976729818262698599361122', '7348839706916210946024927859077721504476398931'))
# print(Solution().multiply('0', '9133'))
# print(Solution().multiply('1000000000', '1000000000'))
# print(Solution().multiply('2', '0'))
print(Solution().multiply('0', '0'))
# print(Solution().multiply('123', '456'))
| true
|
4ce0891bf873eac480883808880ffac083810e0a
|
Python
|
LimSangSang/python_study
|
/chapter_04_02.py
|
UTF-8
| 3,368
| 4.03125
| 4
|
[] |
no_license
|
# 시퀀스 형
# 컨테이너(Container: 서로 다른 자료형[list, tuple, collections.deque])
# 플랫(Flat: 한개의 자료형[str, bytes, bytearray, array.array, memoryview])
# 가변(list, bytearray, array.array, memoryview, deque)
# 불변(tuple, str, bytes)
# Tuple Advanced
# Unpacking
# b, a = a, b (다른 언어는 임시 변수를 만들어서 a, b를 각각 할당했다가 그 다음 교차해주는게 필요한데 python은 바로 할당 가능)
print(divmod(100, 9)) # divmod는 100을 9로 나눈 몫과 나머지를 반환해주는 함수(11, 1)
# print(divmod((100, 9))) TypeError: divmod expected 2 arguments, got 1
print(divmod(*(100, 9))) # 튜플을 풀어서 넣어줘야함 (11, 1)
print(*(divmod(100, 9))) # 11 1 결과값 튜플이 풀림
# x, y, rest = range(10) # ValueError: too many values to unpack (expected 3)
x, y, *rest = range(10)
print(x, y, rest) # 0 1 [2, 3, 4, 5, 6, 7, 8, 9]
x, y, *rest = range(2)
print(x, y, rest) # 0 1 []
x, y, *rest = 1, 2, 3, 4, 5
print(x, y, rest) # 1 2 [3, 4, 5]
# Mutable(가변) vs Immutable(불변)
l = (15, 20, 25) # tuple 불변
m = [15, 20, 25] # list 가변
# 새로운 변수를 재할당 했기 때문에 id값이 전부 다름
print(l, id(l)) # (15, 20, 25) 140361051709568
print(m, id(m)) # [15, 20, 25] 140361052258432
l = l * 2
m = m * 2
print(l, id(l)) # (15, 20, 25, 15, 20, 25) 140361051798496
print(m, id(m)) # [15, 20, 25, 15, 20, 25] 140361052258368
l *= 2
m *= 2
print(l, id(l))
# (15, 20, 25, 15, 20, 25, 15, 20, 25, 15, 20, 25) 140361015516176
# 불변형은 한 번 id값을 할당하면 수정을 할 수 없기 때문에 id가 재할당 이루어짐
print(m, id(m))
# [15, 20, 25, 15, 20, 25, 15, 20, 25, 15, 20, 25] 140361052258368
# 가변형은 자기 id값에 추가를 한다
# sort vs sorted
# reverse, key=len, key=str.Lower, key=func...
# sorted : 정렬 후 새로운 객체 반환(원본 수정x)
f_list = ['orange', 'apple', 'mango',
'papaya', 'lemon', 'strawberry', 'coconut']
# sorted - ['apple', 'coconut', 'lemon', 'mango', 'orange', 'papaya', 'strawberry']
print('sorted - ', sorted(f_list))
# sorted - ['strawberry', 'papaya', 'orange', 'mango', 'lemon', 'coconut', 'apple']
print('sorted - ', sorted(f_list, reverse=True))
# sorted - ['apple', 'mango', 'lemon', 'orange', 'papaya', 'coconut', 'strawberry']
print('sorted - ', sorted(f_list, key=len)) # 길이순
# sorted - ['papaya', 'orange', 'apple', 'lemon', 'mango', 'coconut', 'strawberry']
print('sorted - ', sorted(f_list, key=lambda x: x[-1])) # 단어 끝 글자부터 정렬
# print('sorted - ', sorted(f_list, key=lambda x: x[-1], reverse=True))
# print(f_list)
# sort : 정렬 후 객체 직접 변경
# 반환 값 확인(None) - 반환값이 없음
# sort - None ['apple', 'coconut', 'lemon', 'mango', 'orange', 'papaya', 'strawberry'] -> 원본이 수정됨
print('sort - ', f_list.sort(), f_list)
# sort - None ['strawberry', 'papaya', 'orange', 'mango', 'lemon', 'coconut', 'apple']
print('sort - ', f_list.sort(reverse=True), f_list)
# sort - None ['papaya', 'orange', 'apple', 'lemon', 'mango', 'coconut', 'strawberry']
print('sort - ', f_list.sort(key=lambda x: x[-1]), f_list)
# sort - None ['strawberry', 'coconut', 'mango', 'lemon', 'orange', 'apple', 'papaya']
print('sort - ', f_list.sort(key=lambda x: x[-1], reverse=True), f_list)
| true
|