blob_id stringlengths 40 40 | language stringclasses 1 value | repo_name stringlengths 5 133 | path stringlengths 2 333 | src_encoding stringclasses 30 values | length_bytes int64 18 5.47M | score float64 2.52 5.81 | int_score int64 3 5 | detected_licenses listlengths 0 67 | license_type stringclasses 2 values | text stringlengths 12 5.47M | download_success bool 1 class |
|---|---|---|---|---|---|---|---|---|---|---|---|
ebc4fdbad1d00fc9ad7fe0ac09ec4acc05ccf69c | Python | kolibril13/tricks_for_python | /m3_functions_iter_ad.py | UTF-8 | 268 | 3.984375 | 4 | [] | no_license | # list of vowels
vowels = ['a', 'e', 'i', 'o', 'u']
vowelsIter = iter(vowels)
# prints 'a'
print(next(vowelsIter))
# prints 'e'
print(next(vowelsIter))
# prints 'i'
print(next(vowelsIter))
# prints 'o'
print(next(vowelsIter))
# prints 'u'
print(next(vowelsIter)) | true |
952d7e80523f4c56dfff7a333e739a02f2af6d06 | Python | raulmogos/uni-projects | /FP/labs/tema lab 01/set_C_p16.py | UTF-8 | 780 | 3.75 | 4 | [] | no_license | def gen(n):
'''
program that generates the largest number smaller than n
'''
n=n-1 # the number must be smaller
while perfect_number(n)==False and n>0:
n=n-1
if n==0 : return False
return n
def perfect_number(p):
'''
checks if a number p is perfect or not
'''
s=1
t=p
for i in range(2,p):
if p%i==0 : s=s+i
if p==s : return True
return False
def run_ui():
while True:
N = input("enter a number : ")
N = int(N)
if gen(N)==False:
print ('there is not a such number ')
else : print (gen(N))
def test():
assert perfect_number(6)==True
assert perfect_number(5)==False
assert perfect_number(8)==False
test()
run_ui()
| true |
00a50fc82460029f0ffdbd587d19218c93fbf9a8 | Python | summer-vacation/AlgoExec | /tencent/linkedlist/mergeTwoLists.py | UTF-8 | 1,881 | 3.5 | 4 | [
"MIT"
] | permissive | # -*- coding: utf-8 -*-
"""
File Name: mergeTwoLists
Author : jing
Date: 2020/3/19
https://leetcode-cn.com/explore/interview/card/tencent/222/linked-list/910/
"""
from tencent.linkedlist.ListNode import ListNode
class Solution:
def mergeTwoLists(self, l1: ListNode, l2: ListNode) -> ListNode:
result = ListNode(0)
self.merge(l1, l2, result)
return result.next
def merge(self, l1, l2, result):
if l1 is None and l2 is None:
return
elif l1 is not None and l2 is None:
result.next = l1
return
elif l1 is None and l2 is not None:
result.next = l2
return
else:
if l1.val <= l2.val:
result.next = l1
self.merge(l1.next, l2, result.next)
else:
result.next = l2
self.merge(l1, l2.next, result.next)
def mergeTwoLists2(self, l1: ListNode, l2: ListNode) -> ListNode:
head = ListNode(0)
cur = head
cur1 = l1
cur2 = l2
while cur1 and cur2:
if cur1.val <= cur2.val:
cur.next = cur1
cur1 = cur1.next
else:
cur.next = cur2
cur2 = cur2.next
cur = cur.next
if cur1:
cur.next = cur1
if cur2:
cur.next = cur2
return head.next
def mergeTwoLists3(self, l1: ListNode, l2: ListNode) -> ListNode:
if l1 is None:
return l2
elif l2 is None:
return l1
elif l1.val < l2.val:
l1.next = self.mergeTwoLists3(l1.next, l2)
return l1
else:
l2.next = self.mergeTwoLists3(l2.next, l1)
return l2
if __name__ == '__main__':
print(Solution().mergeTwoLists())
| true |
69f5dfd32dc895b87948b8398a61f929880e2aae | Python | vsalex/call-stats | /tests/test_models.py | UTF-8 | 2,238 | 2.796875 | 3 | [] | no_license | import unittest
import json
from app.models import DailyStatObj, Call, Duration
class DailyStatTestCase(unittest.TestCase):
def setUp(self):
self.dso = DailyStatObj()
def test_init_obj_True(self):
self.assertIsInstance(self.dso, DailyStatObj)
def test_init_obj_False(self):
with self.assertRaisesRegex(
TypeError,
"takes 1 positional argument but 2 were given"):
DailyStatObj("Hello world!")
def test_init_True(self):
self.assertIsInstance(self.dso.errors, list)
self.assertIs(self.dso.entry_time, None)
def test_set_call_id_true(self):
call_id = 1155
self.dso._set_call_id(call_id, int)
self.assertEqual(call_id, self.dso.call_id)
def test_set_call_id_false(self):
call_id = 1155
self.dso._set_call_id(call_id, str)
self.assertEqual(False, self.dso.call_id)
class CallTestCase(unittest.TestCase):
def setUp(self):
# Load data from fixtures
with open("tests/fixtures/call/call1.json", 'r') as f:
json_data = json.load(f)
for k, v in json_data.items():
setattr(self, k, v)
def test_init_obj_True(self):
self.call = Call(self.call_id, self.timestamp, self.source_number,
self.dest_number)
self.assertIsInstance(self.call, Call)
def test_init_obj_False(self):
with self.assertRaisesRegex(
TypeError,
"missing 4 required positional arguments: 'call_id', "
"'timestamp', 'source_number', and 'dest_number'"):
self.call = Call()
class DurationTestCase(unittest.TestCase):
def setUp(self):
self.call_id = "call_id"
self.duration = 22
def test_init_obj_True(self):
self.duration = Duration(self.call_id, self.duration)
self.assertIsInstance(self.duration, Duration)
def test_init_obj_False(self):
with self.assertRaisesRegex(
TypeError,
"missing 2 required positional arguments: 'call_id' and "
"'duration'"):
self.duration = Duration()
if __name__ == '__main__':
unittest.main()
| true |
99c003f86cba30f155c6487765dabce042d1e614 | Python | Dark-Llama/text-based-rpg | /Character_Class.py | UTF-8 | 1,684 | 3.515625 | 4 | [] | no_license | import random
class Character:
"""Create Character Class"""
name = ""
def __init__(self):
pass
def basic_attack(self, defender):
# calculate damage
damage = self.atk - defender.dfs
# calculate hit chance
spd_diff = self.spd - defender.spd
hit_chance = 75
hit_chance = hit_chance + spd_diff
hit_prob = random.randrange(self.luck, hit_chance)
# on hit fail
if hit_prob < 15:
print("{}'s attack missed!".format(self.name))
return
# on hit success
else:
# output no damage
if damage <= 0:
print("The attack has no effect.")
damage = 0
else:
# calculate crit chance and damage
crit = random.randrange(self.luck, 100, 5)
if (crit > 85):
damage = int(damage * 1.5)
print("Critical Hit!")
# calculate defender hp
defender.hp = defender.hp - damage
print("{} did {} damage!".format(self.name, damage))
def magic_attack(self, defender):
damage = self.magic_atk - defender.dfs
crit = random.randrange(self.luck, 100, 5)
if damage <= 0:
print("The attack has no effect.")
damage = 0
else:
if (crit > 85):
damage = damage * 1.5
print("Critical Hit!")
defender.hp = defender.hp - damage
print("{} did {} damage!".format(self.name, damage))
def defend(self):
pass | true |
a4fe1593b9bb21a492b9efb61aefae2867d9aca4 | Python | john-clark/rust-oxide-umod | /old/plugins/other/StartupItems.py | UTF-8 | 4,511 | 2.578125 | 3 | [
"MIT"
] | permissive | # Note:
# I add an underscore at the biginning of the variable name for example: "_variable" to prevent
# conflicts with build-in variables from Oxide.
# Use to manage the player's inventory.
import ItemManager
# Use to get player's information.
import BasePlayer
# The plug-in name should be the same as the class name and file name.
class StartupItems:
# Always start with a constructor.
def __init__(self):
# All the variables listed below are recommended for the plug-in and developer informaton.
self.Title = 'StartupItems'
self.Description = 'Set default items when player respawn after dead.'
self.Author = 'RedNinja1337'
self.Version = V(1, 0, 5)
self.Url = 'http://oxidemod.org/plugins/startupitems.1323/'
self.ResourceId = 1323
# Create the configuration file if it does not exists.
def LoadDefaultConfig(self):
# Add some demo data as an example on the configuration file.
self.Config['GroupItems'] = ({
'admin':({'item_shortname':'attire.hide.boots', 'Amount':1, 'Container':'Wear'},
{'item_shortname':'attire.hide.pants', 'Amount':1, 'Container':'Wear'},
{'item_shortname':'rock', 'Amount':1, 'Container':'Belt'},
{'item_shortname':'bow.hunting', 'Amount':1, 'Container':'Belt'},
{'item_shortname':'arrow.hv', 'Amount':25, 'Container':'Main'},),
'moderator':({},),
'player':({},)
})
# Called from BasePlayer.Respawn.
# Called when the player spawns (specifically when they click the "Respawn" button).
# ONLY called after the player has transitioned from dead to not-dead, so not when they're waking up.
def OnPlayerRespawned(self, BasePlayer):
# Check if there is any group set on the configuration file.
if self.Config['GroupItems']:
# If at least one group is found on the configuration file then set the variable "_GroupItems" equals the group's dictionary.
_GroupItems = self.Config['GroupItems']
# Set the variable "_Group" equals the list of groups the player belogs to. By default all players belog to the group "player".
_Group = permission.GetUserGroups(BasePlayer.userID.ToString())
# Set the variable "_SetGroup" equals the last group the user was added from Oxide.Group. By default all players belog to the group "player".
_SetGroup = _GroupItems.get(_Group[-1])
# Check if the group exists in the config file.
if _SetGroup:
try: # Catch the "KeyNotFoundException" error if "Container", "item_shortname" or "Amount" is not found on the config file.
if _SetGroup[0]['Container'] and _SetGroup[0]['item_shortname'] and _SetGroup[0]['Amount']:
# Set the variable "inv" equals the player's inventory.
inv = BasePlayer.inventory
# Empty the player's inventory.
inv.Strip()
# Iterate through the list of items for the specify group from the configuration file.
for item in _SetGroup:
# Add the items set on the configuration file to each container on the player's inventory.
if item['Container'].lower() == 'main':
inv.GiveItem(ItemManager.CreateByName(item['item_shortname'],item['Amount']), inv.containerMain)
elif item['Container'].lower() == 'belt':
inv.GiveItem(ItemManager.CreateByName(item['item_shortname'],item['Amount']), inv.containerBelt)
elif item['Container'].lower() == 'wear':
inv.GiveItem(ItemManager.CreateByName(item['item_shortname'],item['Amount']), inv.containerWear)
else: return
else: print False
# Catch the "KeyNotFoundException" error if "Container", "item_shortname" or "Amount" is not found on the config file.
except KeyError: return
else: return
else: return
| true |
d44ea819a48abe67267c85d1fb00d8b298d3e199 | Python | sruthi-batchala/captain | /lists.py | UTF-8 | 305 | 3.421875 | 3 | [] | no_license | #test case1
n=int(input('enter number'))
lst=[]
for i in range(n):
num=int(input('enter the value'))
if num>0:
lst.append(num)
print(lst)
#test case2
n=int(input())
val=[]
num=list(map(int,input().split()))
for i in range(len(num)):
if num[i]>0:
val.append(num[i])
print(val)
| true |
820c2f3407ba3de7af0993aaabb3f7e225b9a9ef | Python | Rodarc20/CC-BuscadorTextos | /reducer-none.py | UTF-8 | 300 | 2.59375 | 3 | [] | no_license | #!/usr/bin/env python3
"""reducer.py"""
from operator import itemgetter
import sys
import math
dictionary = {}
numfiles = 17
for line in sys.stdin:
line = line.strip()
word, info = line.split('\t', 1)
filename, tf = info.split(',', 1)
print('%s\t%s,%s' % (word, filename, tf))
| true |
2f01b188b556084398d6d26915e18ecf68305bcb | Python | helunxing/algs | /leetcode/140.单词拆分-ii.py | UTF-8 | 647 | 2.984375 | 3 | [] | no_license | #
# @lc app=leetcode.cn id=140 lang=python3
#
# [140] 单词拆分 II
#
class Solution:
def dfs(self, s):
if s in self.d:
return self.d[s]
res = []
if not s:
res.append('')
return res
for word in self.wD:
if s.startswith(word):
sublist = self.dfs(s[len(word):])
for subs in sublist:
res.append(word + (' ' if subs else '') + subs)
self.d[s] = res
return res
def wordBreak(self, s: str, wordDict: List[str]) -> List[str]:
self.d, self.wD = {}, wordDict
return self.dfs(s)
| true |
731e6f6b377ce31158fe4b81d695878f962dea19 | Python | vigi4cure/vigi4cure.github.io | /strava_explore_segments.py | UTF-8 | 1,239 | 2.65625 | 3 | [] | no_license | #!/usr/bin/python3
import time
import numpy as np
from stravalib.client import Client
client = Client(access_token='99c2994556a29905b96eb4197996854041ca47ca')
# bounds = (45.380184 , -74.023017, 45.719182 , -73.436622)
flist = open('slist.txt', 'w')
ferror = open('serror.txt', 'w')
# for x in np.arange(45.28,45.71,0.05):
# for y in np.arange(-74.12, -73.43, 0.07):
# bounds = (x, y, x+0.075, y+0.105)
for x in np.arange(45.380184,45.719182-0.03,0.03):
for y in np.arange(-74.023017, -73.436622-0.05, 0.05):
bounds = (x, y, x+0.045, y+0.075)
# activityType = activityType_example # String | Desired activity type. (optional)
# minCat = 56 # Integer | The minimum climbing category. (optional)
# maxCat = 56 # Integer | The maximum climbing category. (optional)
try:
segments = client.explore_segments(bounds)
print(len(segments))
for segment in segments:
flist.write(str(segment.id) + '\n')
except:
print('%.6f,%.6f,%.6f,%.6f\n' % (x, y, x+0.045, y+0.075))
ferror.write('%.6f,%.6f,%.6f,%.6f\n' % (x, y, x+0.045, y+0.075))
# pass
time.sleep(1.5)
flist.close()
ferror.close()
| true |
5aebaec848691df94de03cabc4202f42d9e507db | Python | cjh0613/language-blocker-bot | /bot.py | UTF-8 | 2,860 | 2.90625 | 3 | [] | no_license | from os import environ
from sys import argv
from telegram.ext import (
Updater,
MessageHandler,
CommandHandler,
Filters
)
from telegram import Bot
from threading import Timer
RANGES = range(97, 123), range(65, 91)
def valid_message(message: str) -> bool:
for character in message:
if character.isalpha():
character_code = ord(character)
if all(character_code not in x for x in RANGES):
print(f'invalid character {character}')
return False
return True
def message_handler(update, _):
"""Send a message when the command /start is issued."""
message_text = update.message.text
user_name = update.message.from_user.first_name
if not valid_message(message_text) and not TEMPORARILY_ALLOWED:
my_message = update.message.reply_text('{}, Please write in english only dude!'.format(user_name))
bot.delete_message(chat_id=update.message.chat.id, message_id=update.message.message_id)
timer = Timer(3, bot.delete_message, kwargs={"chat_id": my_message.chat.id, "message_id": my_message.message_id})
timer.start()
def temporarily_allow_handler(update, _):
""" Temporarily allow to send messages """
global TEMPORARILY_ALLOWED
user_id = update.message.chat.id
if user_id == ADMIN_ID:
TEMPORARILY_ALLOWED = True
update.message.reply_text("Temprarily allowed!")
def disallow_handler(update, _):
""" Temporarily allow to send messages """
global TEMPORARILY_ALLOWED
user_id = update.message.chat.id
if user_id == ADMIN_ID:
TEMPORARILY_ALLOWED = False
update.message.reply_text("Temprarily allowed disabled!")
if __name__ == '__main__':
TEMPORARILY_ALLOWED = False
try:
BOT_TOKEN = environ['BOT_TOKEN']
ADMIN_ID = int(environ['ADMIN_ID'])
except KeyError:
print('No env bot token have been provieded.')
print('Usage: export BOT_TOKEN=<BOT_TOKEN> ADMIN_ID=<ADMIN USER ID>')
print('python3 {}'.format(argv[0]))
exit(1)
"""Start the bot."""
# Create the Updater and pass it your bot's token.
# Make sure to set use_context=True to use the new context based callbacks
# Post version 12 this will no longer be necessary
updater = Updater(BOT_TOKEN, use_context=True)
bot = Bot(BOT_TOKEN)
# Get the dispatcher to register handlers
dp = updater.dispatcher
# on different commands - answer in Telegram
dp.add_handler(MessageHandler(filters=~Filters.command, callback=message_handler))
dp.add_handler(CommandHandler("allow", callback=temporarily_allow_handler))
dp.add_handler(CommandHandler("disallow", callback=disallow_handler))
# Start the Bot
print('Bot running')
updater.start_polling()
updater.idle()
| true |
e3d51334f1fa7c85f775036eea8ce7d230c905ac | Python | prawn-cake/hashcode2018 | /task/solution.py | UTF-8 | 7,513 | 2.8125 | 3 | [] | no_license | import time
from task import parse_input, parse_output, helpers
from collections import namedtuple
Ride = namedtuple('Ride', ['id', 'coord_start', 'coord_finish', 'start_t', 'finish_t', 'dist'])
Order = namedtuple('Order', ['ride', 'actual_start_t', 'actual_end_t'])
class Car:
def __init__(self, idx, t=0):
self.id = idx
self.t = t
def __repr__(self):
return 'Car(%s, %s)' % (self.id, self.t)
def pick_car(ride, car_list, schedule, radar, avg_distance_per_car, reserved_cars, use_balancer=True):
actual_start_t, actual_end_t, picked_car, min_cost = None, None, None, None
for car in car_list:
# Load balancer function
if use_balancer:
if sum([o.ride.dist for o in schedule[car]]) >= 0.7 * avg_distance_per_car:
reserved_cars.append(car)
continue
if not is_car_available(car, ride, schedule):
continue
time_to_reach = helpers.distance(radar[car.id], ride.coord_start)
time_to_wait = max(0, ride.start_t - car.t - time_to_reach)
ride_distance = helpers.distance(ride.coord_start, ride.coord_finish)
actual_start_t = car.t + time_to_reach + time_to_wait
actual_end_t = actual_start_t + ride_distance
if actual_end_t >= ride.finish_t:
# a car can't make it
continue
cost = time_to_reach + time_to_wait + ride_distance
if min_cost is None:
min_cost, picked_car = cost, car
elif cost < min_cost:
picked_car = car
min_cost = cost
return picked_car, actual_start_t, actual_end_t, min_cost, reserved_cars
def prio_by_start_t(rides_list):
# Default implementation to sort by start time
return sorted(rides_list, key=lambda r: r.start_t)
def prio_by_start_t_and_dist(rides_list):
return sorted(rides_list, key=lambda r: (r.start_t, helpers.distance((0, 0), r.coord_start)))
def prio_by_distance(rides_list):
return sorted(rides_list, key=lambda r: r.dist)
def prio_by_finish_t(rides_list):
return sorted(rides_list, key=lambda r: r.finish_t)
def main(meta, rides, prioritise_rides=prio_by_start_t):
rows, columns, vehicles, num_of_rides, bonus, T = meta
print('rows: %s, columns: %s, vehicles: %s, num_of_rides: %s, bonus: %s, T: %s' % (rows, columns, vehicles, num_of_rides, bonus, T))
rides_list = [Ride(i, item[0], item[1], item[2][0], item[2][1], helpers.distance(item[0], item[1]))
for i, item in enumerate(rides)]
# for ride in rides_list:
# print(ride)
total_distance = sum([ride.dist for ride in rides_list])
min_rate_to_skip = 1 - ((T * vehicles) / total_distance)
print('Min accepted rate of skipped rides: {:.2f} %'.format(min_rate_to_skip * 100))
cars_list = [Car(i, 0) for i in range(vehicles)]
schedule = {car: [] for car in cars_list}
# To track cars
radar = {car.id: (0, 0) for car in cars_list}
# TODO: this could be reordered
priority_queue = prioritise_rides(rides_list)
avg_distance_per_car = total_distance / vehicles
total_score = 0
total_ride_cost = 0
total_bonus = 0
skipped_rides = 0
for ride in priority_queue:
# if total_score > T:
# break
reserved_cars = []
picked_car, actual_start_t, actual_end_t, min_cost, reserved_cars = pick_car(
ride, cars_list, schedule, radar, avg_distance_per_car, reserved_cars)
if picked_car is None:
# Try to find a car from the reserved ones
if reserved_cars:
picked_car, actual_start_t, actual_end_t, min_cost, reserved_cars = pick_car(
ride, cars_list, schedule, radar, avg_distance_per_car, reserved_cars, use_balancer=False)
if picked_car is None:
skipped_rides += 1
continue
else:
skipped_rides += 1
continue
# Add an order
total_score += ride_score(total_ride_cost, picked_car, ride, radar, bonus)
schedule[picked_car].append(Order(ride, actual_start_t, actual_end_t))
radar[picked_car.id] = ride.coord_finish
total_ride_cost += min_cost
picked_car.t += min_cost
if actual_start_t == ride.start_t:
total_bonus += bonus
print('skipped: {} ({} %)'.format(skipped_rides, (skipped_rides / num_of_rides) * 100))
print('total score: %d (score: %d, bonus: %d)' % (total_score + total_bonus, total_score, total_bonus))
return schedule
def is_car_busy(car, schedule, current_t, cost):
orders = schedule[car]
if not orders:
return False
last_order = orders[-1]
if (last_order.start_t - current_t) > cost:
# We have some room to move
return True
return False
def is_car_available(car, ride, schedule):
orders = schedule[car]
if not orders:
return True
# Check if we can fit the order to the schedule
res = [(order.actual_end_t + ride.dist) < ride.finish_t for order in orders]
return all(res)
def ride_score(cur_time, car, ride, radar, bonus=0):
car_coord = radar[car.id]
time_to_reach = helpers.distance(car_coord, ride.coord_start)
time_to_wait = max(0, ride.start_t - cur_time - time_to_reach)
ride_distance = ride.dist
actual_start_time = cur_time + time_to_reach + time_to_wait
actual_end_time = actual_start_time + ride_distance
if actual_start_time != ride.start_t:
# if we pick up the passenger with delay -> no bonus
bonus = 0
if actual_end_time >= ride.finish_t:
# We don't get points if we deliver the passenger late
return 0
return ride_distance + bonus
def ride_cost(car, ride, radar):
car_coord = radar[car.id]
time_to_reach = helpers.distance(car_coord, ride.coord_start)
time_to_wait = max(0, ride.start_t - car.t - time_to_reach)
ride_distance = ride.dist
actual_start_t = car.t + time_to_reach + time_to_wait
actual_end_t = actual_start_t + ride_distance
cost = time_to_reach + time_to_wait + ride_distance
return cost, actual_start_t, actual_end_t
def generate_result(filename, prio_fn):
meta, rides = parse_input.parse('../data/%s' % filename)
t0 = time.time()
schedule = main(meta, rides, prioritise_rides=prio_fn)
print('Elapsed {:.4f}s'.format(time.time() - t0))
items = []
for car, orders in sorted(schedule.items(), key=lambda item: item[0].id):
items.append([str(o.ride.id) for o in orders])
# print('car %d -> %s' % (car.id, ', '.join([str(o.ride.id) for o in orders])))
parse_output.write_output(items, '../result/%s.txt' % filename)
if __name__ == '__main__':
# meta, rides = parse_input.parse('../data/a_example.in')
names = [
# file, rides_prio function
('a_example.in', prio_by_start_t),
('b_should_be_easy.in', prio_by_start_t),
('c_no_hurry.in', prio_by_distance),
('d_metropolis.in', prio_by_finish_t),
('e_high_bonus.in', prio_by_start_t)
]
# filename = 'a_example.in'
# filename = 'b_should_be_easy.in'
# filename = 'c_no_hurry.in'
# filename = 'd_metropolis.in'
# filename = 'e_high_bonus.in'
for filename, prio_fn in names:
print('-' * 100)
print('Generate result for %s' % filename)
generate_result(filename, prio_fn)
# generate_result(filename, prio_by_finish_t)
| true |
d0f9875ac79f97472f747c291f1dfb987405aaf0 | Python | AhmedRaafat14/CodeForces-Div.2A | /499A - WatchingAMovie.py | UTF-8 | 2,353 | 3.875 | 4 | [] | no_license | '''
You have decided to watch the best moments of some movie.
There are two buttons on your player:
-- Watch the current minute of the movie. By pressing this button,
you watch the current minute of the movie and the player automatically
proceeds to the next minute of the movie.
-- Skip exactly x minutes of the movie (x is some fixed positive integer).
If the player is now at the t-th minute of the movie,
then as a result of pressing this button, it proceeds to
the minute (t + x).
Initially the movie is turned on in the player on the first minute,
and you want to watch exactly n best moments of the movie,
the i-th best moment starts at the li-th minute and ends at
the ri-th minute (more formally, the i-th best moment consists of minutes:
li, li + 1, ..., ri).
Determine, what is the minimum number of minutes of the movie you
have to watch if you want to watch all the best moments?
====================================
Input
The first line contains two space-separated integers n, x
(1 ≤ n ≤ 50, 1 ≤ x ≤ 105) — the number of the best moments of
the movie and the value of x for the second button.
The following n lines contain the descriptions of the best moments of
the movie, the i-th line of the description contains two integers
separated by a space li, ri (1 ≤ li ≤ ri ≤ 105).
It is guaranteed that for all integers i from 2 to n the following
condition holds: ri - 1 < li.
====================================
Output
Output a single number — the answer to the problem.
====================================
Sample test(s)
Input
2 3
5 6
10 12
Output
6
Input
1 1
1 100000
Output
100000
'''
## Running Time ====>>>> 108 ms
data = input().split()
N, X = int(data[0]), int(data[1])
best = [None] * N
for i in range(N):
best[i] = list(map(int, input().split()))
minuts = 1
counter = 0
## For all minuts+X less than th Li so add X to minuts
## If we found minuts+X is greater than Li and minuts less than Li
## For all minuts less than Ri we just move one minute and count 1
for i in range(N):
while minuts+X <= best[i][0]:
minuts += X
if minuts <= best[i][0]:
while minuts <= best[i][1]:
counter += 1
minuts += 1
print(counter)
| true |
64d02da25568c4f49e97655dd5fba87cada726ef | Python | rorycodinstuff/DSM-artefact | /Python/regexSearch.py | UTF-8 | 935 | 3.3125 | 3 | [] | no_license | #imports
import re, shelve, pyperclip, sys, os
file_list = []
stored_text = []
# Get folder and user regex
folder_name = input('Enter a folder filepath. Add a slash at the end of the path.\n')
search_term = input('Enter an expression you wish to search for.\n')
sregex = re.compile(search_term, re.I)
# Check if folder exists
if os.path.isdir(folder_name) == False:
print('The folder you specified does not exist.')
else:
file_list = os.listdir(folder_name)
# Open all text files in folder, save to a list.
for file in file_list:
file_path = os.path.join(folder_name, file)
text = open(file_path).readlines()
stored_text.insert(len(stored_text), text[0])
# Search for user supplied regular expression and print results
for i in range(len(stored_text)):
mo = sregex.search(stored_text[i])
if mo:
print('"' + mo.group() + '" found in ' + file_list[i])
| true |
8c36859f9a1ace1d639725301f2161ba0b9f747a | Python | M1ky/Daily-Coding-Problem | /daily_temperatures.py | UTF-8 | 712 | 4.3125 | 4 | [
"MIT"
] | permissive | '''
Given a list of daily temperatures T, return a list such that, for each day in the input,
tells you how many days you have to wait until the temperature will be warmer.
If there is no such day, put 0.
'''
def days_till_warmer_temperature(arr):
# table holding solution
out = [0]*len(arr)
stack = []
for index in range(len(arr)):
temperature = arr[index]
while len(stack) > 0 and arr[stack[-1]] < temperature:
oldIndex = stack[-1]
if temperature > arr[oldIndex]:
out[oldIndex] = index - oldIndex
stack.pop()
else:
break
stack.append(index)
return out
test = [73, 74, 75, 71, 69, 72, 76, 73]
print(days_till_warmer_temperature(test)) | true |
ca8dca7831a84c20749c4fe1ef252b470cb613a2 | Python | Aethiles/ppo-pytorch | /test/helpers/parameters.py | UTF-8 | 1,405 | 2.640625 | 3 | [
"MIT"
] | permissive | import torch
import torch.nn as nn
import torch.nn.functional as F
from typing import Tuple
class TestParameters(nn.Module):
def __init__(self,
input_size: int,
output_size: int,
device: torch.device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu'),
):
super(TestParameters, self).__init__()
self.input_size = input_size
self.output_size = output_size
self.linear1 = nn.Linear(self.input_size, self.output_size)
self.linear2 = nn.Linear(self.input_size, 1)
self.forward_ctr = 0
self.update_ctr = 0
self.device = device
self.to(device)
self.optimizer = torch.optim.Adam(self.parameters(), lr=2.5 * 1e-4)
def forward(self,
x: torch.Tensor,
) -> Tuple[torch.Tensor, torch.Tensor]:
x = x.view(-1, self.input_size)
y = F.relu(self.linear2(x))
x = torch.sigmoid(self.linear1(x))
self.forward_ctr += 1
return x, y
def gradient_update(self,
loss: torch.Tensor,
clip: bool = False,
):
self.optimizer.zero_grad()
loss.backward()
if clip:
torch.nn.utils.clip_grad_norm_(self.parameters(), 0.5)
self.optimizer.step()
self.update_ctr += 1
| true |
a4422a501f73501bbce2832a95fee36b98f58720 | Python | thcjylh/JC_Calculate | /GB8076.py | UTF-8 | 4,131 | 2.8125 | 3 | [] | no_license | import random
import math
import four_homes_and_six_entries as f
import numpy
from decimal import Decimal
def bleeding_water(b, c_type, w, g): # 泌水率
b = random.randint(round(b * 9), round(b * 11)) / 10 # 泌水率%
g0 = 800
gw = 9900
if c_type == 0: # 基准混凝土
g0 = random.randint(798, 812) # 筒质量
gw = random.randint(9900, 9910) # 试样质量
elif c_type == 1: # 掺外加剂混凝土
g0 = random.randint(1595, 1618) # 筒质量
gw = random.randint(9948, 9960) # 试样质量
w = float(w)
g = float(g)
vw = f.xy(Decimal(b) / (Decimal(100) / ((Decimal(w) / Decimal(g)) * Decimal(gw))), 1)
b = f.xy(Decimal(vw) / (Decimal(w) / Decimal(g)) / Decimal(gw) * Decimal(100), 0.1)
return g0, g0 + gw, gw, vw, b
def setting_time(s_time, h, m, c_type): # 凝结时间计算
s_time = random.randint(s_time - 5, s_time + 5)
f_time = 180
if c_type == 0: # 基准
f_time = 180
elif c_type == 1: # 外加剂(缓凝)
f_time = 300
elif c_type == 2: # 外加剂(早强)
f_time = 90
test_strength = random.randint(2, 3) / 10 # 初次测定强度:0.2~0.3MPa
k = (math.log(s_time) - math.log(f_time)) / (math.log(3.5) - math.log(test_strength)) # 基准斜率
b = math.log(f_time) - k * math.log(test_strength) # 基准截距
x = ['0'] # 输出测定时间
y = [0] # 输出测量强度值
while test_strength < 350:
temp = int(math.e ** ((math.log(f_time) - b) / k) * 100) # 基准曲线中当前时间下的贯入阻力
test_strength = random.randint(temp - 10, temp + 15) # 贯入阻力随机化
if test_strength < 15:
test_strength = random.randint(temp, temp + 15) # 贯入阻力随机化
test_strength = f.xy(test_strength, 10) # 贯入阻力修约
if test_strength > y[-1]: # 判定贯入阻力是否与上次相同
x.append(str((f_time + h * 60 + m) // 60) + ':' + '{:0>2d}'.format((f_time + h * 60 + m) % 60))
y.append(test_strength)
if y[-1] <= 250: # 最后一次贯入阻力小于2.5MPa,下次测定为30min后
f_time = f_time + 30
elif 250 < y[-1] < 350: # 最后一次贯入阻力在2.5~3.5MPa,下次测定为15min后
f_time = f_time + 15
del x[0] # 删除第一个空值
del y[0] # 删除第一个空值
return x, y
def air_content(ag): # 含气量
a01 = random.randint(10, 14) / 10 # 第一次骨料含气量
a02 = random.randint(10, 14) / 10 # 第二次骨料含气量
a0 = f.xy((Decimal(a01) + Decimal(a02)) / 2, 0.1) # 骨料含气量
ag = ag * 10
ag_min = round(ag * 0.9)
ag_max = round(ag * 1.1)
ag = (random.randint(ag_min, ag_max) + a0 * 10) / 10 # 实测未校正含气量
x = random.randint(0, 2) / 10 # 与平均值偏差
ag1 = f.xy(Decimal(ag) - Decimal(x), 0.1)
ag2 = f.xy(Decimal(ag) + Decimal(x), 0.1)
ag = f.xy((Decimal(ag1) + Decimal(ag2)) / 2, 0.1)
a = f.xy(Decimal(ag) - Decimal(a0), 0.1)
return a01, a02, a0, ag1, ag2, ag, a
def shrinkage(ratio):
l0 = random.randint(3500, 3900) / 1000 # 试件长度的初始读数
lb = 470 # 试件的测量标距
epsilon_st_d = ratio * 10 ** -6 # 设计的收缩率
lt = f.xy(Decimal(l0) - (Decimal(epsilon_st_d) * Decimal(lb)), 0.001) # 测得的试件长度
epsilon_st_t = f.xy((Decimal(l0) - Decimal(lt)) / Decimal(lb) * (Decimal(10) ** Decimal(6)), 0.1)
return l0, lt, epsilon_st_t
def strength(s_mpa):
s_kn = round(s_mpa / 0.095, 1) # 输入强度转换为力值
s_min = int(s_kn * 0.93 * 10) # 随机生成的最小力值
s_max = int(s_kn * 1.07 * 10) # 随机生成的最大力值
kn = [] # 输出的力值
mpa = [] # 输出的强度
mpa_aver = 0
for i in range(3): # 生成三次
temp = f.xy(random.randint(s_min, s_max) / 10, 0.5)
kn.append(temp)
mpa.append(f.xy(Decimal(temp) * Decimal(0.095), 0.1))
mpa_aver = f.xy(numpy.mean(mpa), 0.1)
return kn, mpa, mpa_aver
| true |
4241f6cdd443fdaed0b56a74afa10aaa23f650a6 | Python | solomongarber/MarkovLowPass | /medQueue.py | UTF-8 | 696 | 2.75 | 3 | [] | no_license | import numpy as np
class medQueue:
def __init__(self,support,frame_shape,num_pixels,num_channels):
self.frames=np.zeros((num_pixels,num_channels,support),dtype=np.uint8)
for i in range(support/2):
self.frames[:,:,i*2]=255
self.frame_shape=frame_shape
self.ind=0
self.support=support
self.num_pixels=num_pixels
self.num_channels=num_channels
def add_frame(self,frame):
self.frames[:,:,self.ind]=np.reshape(frame,(self.num_pixels,self.num_channels))
self.ind=np.mod(self.ind+1,self.support)
def get_median(self):
ans=np.median(self.frames,2)
return np.reshape(ans,self.frame_shape)
| true |
b17591755c36b65d8194736701d10caeb5d11d60 | Python | chgad/numerical_methods | /exercise_1/smallest_number.py | UTF-8 | 783 | 3.84375 | 4 | [] | no_license | import numpy as np
print(b'0')
def produce_smalest(precision=float):
"""
precsion: Class which precision.
return: smallest exponent of 2 wich can be represented
"""
l=1
y = 2
x=2.0
while y>0.0:
y = precision(x**-l)
l+=1
return l-2
exponent = produce_smalest(precision=np.float32) # produce smalest number
# with single precision
print("2^-{} is the samllest number which can be represented using single precision".format(exponent), np.float32(2**-exponent))
exponent=produce_smalest() # produce smalest number with double precision
print("2^-{} is the samllest number which can be represented using double precision".format(exponent), 2.0**-exponent)
| true |
a174a84825a842f23691f6d7e93f3b0d6c43f3f1 | Python | timedata-org/expressy | /expressy/units.py | UTF-8 | 1,875 | 2.515625 | 3 | [
"MIT"
] | permissive | from . import expression, quotes
import keyword, functools, re
"""
This module is a hack to find Pint units in expressions and replace them
with a call to parse that string as a Pint expression.
See https://github.com/hgrecco/pint for more information about Pint.
"""
PINT_MATCH = r"""
( -? \d+ (?: \.\d* )? ) # A number with an optional decimal.
( \s* \w+ ) # A unit.
(?: ( \s* [ /] ) ( \s* \w+ ) )? # A separator and a unit.
(?: ( \s* [ /] ) ( \s* \w+ ) )? # If I use * to repeat these, it only
(?: ( \s* [ /] ) ( \s* \w+ ) )? # captures the last one...
(?: ( \s* [ /] ) ( \s* \w+ ) )?
(?: ( \s* [ /] ) ( \s* \w+ ) )?
"""
PINT_MATCH_RE = re.compile(PINT_MATCH, re.VERBOSE)
try:
import pint
except ImportError: # pragma: no cover
pint = None
def process_units(s, processor):
def replace(match):
groups = [(g or '').strip() for g in match.groups()]
has_keyword = any(keyword.iskeyword(g) for g in groups)
has_units = any(g.isalpha() for g in groups) # pragma: no cover
can_process = has_units and not has_keyword
s = match.group(0)
return processor(s) if can_process else s
def sub(s):
return PINT_MATCH_RE.sub(replace, s)
return quotes.process_unquoted(s, sub)
def inject(maker, definitions=None, injected_name='pint'):
unit_registry = pint.UnitRegistry()
for d in definitions or []:
unit_registry.define(d)
def parse(s):
return unit_registry.parse_expression(s).to_base_units()
def wrap_name(s):
return "%s('%s')" % (injected_name, s)
def symbols(name):
return parse if name == injected_name else maker.symbols(name)
new_maker = expression.Maker(maker.is_constant, symbols)
def call(s):
return new_maker(process_units(s, wrap_name))
return call
| true |
70c4eada6425edfbbf949651e3a9deb5c7e92c6e | Python | shoppon/leetcode | /leetcode/strings/lc_567.py | UTF-8 | 1,561 | 3.28125 | 3 | [] | no_license | from collections import defaultdict
class Solution:
def checkInclusion1(self, s1: str, s2: str) -> bool:
freq = defaultdict(int)
queue = defaultdict(list)
for s in s1:
freq[s] += 1
used = [0] * len(s2)
s1_len = len(s1)
count = s1_len
for i, s in enumerate(s2):
# 出队的加回来
pre = i-s1_len
if pre >= 0 and used[pre] == 1:
freq[s2[pre]] += 1
count += 1
used[pre] = 0
queue[s2[pre]].pop(0)
if freq[s] == 0 and queue[s]:
queue[s].append(i)
pre = queue[s].pop(0)
used[pre] = 0
used[i] = 1
if freq[s] > 0:
freq[s] -= 1
count -= 1
used[i] = 1
queue[s].append(i)
if count == 0:
return True
return False
def checkInclusion(self, s1: str, s2: str) -> bool:
if len(s1) > len(s2):
return False
freq1 = {}
freq2 = {}
for s in ('abcdefghijklmnopqrstuvwxyz'):
freq1[s], freq2[s] = 0, 0
for _s1, _s2 in zip(s1, s2):
freq1[_s1] += 1
freq2[_s2] += 1
if freq1 == freq2:
return True
s1_len = len(s1)
for i in range(s1_len, len(s2)):
freq2[s2[i]] += 1
freq2[s2[i-s1_len]] -= 1
if freq1 == freq2:
return True
return False
| true |
5f3226075fb25cd223efa3fb84b6f81914563894 | Python | s-kostyuk/everpl | /dpl/utils/observer.py | UTF-8 | 811 | 3.1875 | 3 | [
"MIT"
] | permissive | from typing import TypeVar, Generic
T = TypeVar('T')
class Observer(Generic[T]):
"""
Observer is an abstract class which declares the interface to be
implemented by Observer pattern implementations. It specifies a method
for handling of events emitted by Observers - update
"""
def update(self, source: T, *args, **kwargs) -> None:
"""
A method to be called by Observables on any new events emitted
:param source: mandatory, a weak reference to the event source
:param args: optional positional arguments with additional information
about emitted event
:param kwargs: optional keyword arguments with additional information
about emitted event
:return: None
"""
raise NotImplementedError()
| true |
0f8066ba53c82e6dd43635308eef510cf1b8cecd | Python | luohoward/leetcode | /codec2.py | UTF-8 | 1,598 | 3.40625 | 3 | [] | no_license | class Codec:
def serialize(self, root):
"""Encodes a tree to a single string.
:type root: TreeNode
:rtype: str
"""
queue = [root]
ans = []
while len(queue) != 0:
node = queue.pop()
if not node:
ans.append('#')
else:
ans.append(str(node.val))
queue.insert(0, node.left)
queue.insert(0, node.right)
return ','.join(ans)
def deserialize(self, data):
"""Decodes your encoded data to tree.
:type data: str
:rtype: TreeNode
"""
print(data)
dataClean = data.split(",")
if dataClean[0] == '#':
return None
n = len(dataClean)
i = 0
root = TreeNode(int(dataClean[i]))
queueLevelNext = [root]
i = 1
while i < n:
queueLevelThis = [x for x in queueLevelNext]
queueLevelNext = []
while queueLevelThis:
node = queueLevelThis.pop(0)
if dataClean[i] != '#':
node.left = TreeNode(int(dataClean[i]))
queueLevelNext.append(node.left)
i = i+1
if dataClean[i] != '#':
node.right = TreeNode(int(dataClean[i]))
queueLevelNext.append(node.right)
i = i+1
return root | true |
92463c1eb69e28b1f19b9900c30df81c2894835b | Python | puspita-sahoo/codechef_program | /prime.py | UTF-8 | 122 | 3.28125 | 3 | [] | no_license | n = 7
for i in range(2, n):
if n % i == 0:
prime = 'no'
break
else:
prime = 'yes'
print(prime) | true |
7c6b1f5cfa6458bf826eebc03e749ad05b1a7257 | Python | BackupTheBerlios/pyimtool-svn | /PyRAF-Aqua/pyraf/lib/clcache.py | UTF-8 | 10,137 | 2.609375 | 3 | [
"BSD-2-Clause"
] | permissive | """clcache.py: Implement cache for Python translations of CL tasks
$Id: clcache.py,v 1.1 2003/10/08 18:33:12 dencheva Exp $
R. White, 2000 January 19
"""
import os, sys, types, string
import filecache
from irafglobals import Verbose, userIrafHome, pyrafDir
# set up pickle so it can pickle code objects
import copy_reg, marshal, types
try:
import cPickle
pickle = cPickle
except ImportError:
import pickle
def code_unpickler(data):
return marshal.loads(data)
def code_pickler(code):
return code_unpickler, (marshal.dumps(code),)
copy_reg.pickle(types.CodeType, code_pickler, code_unpickler)
# Code cache is implemented using a dictionary clFileDict and
# a list of persistent dictionaries (shelves) in cacheList.
#
# - clFileDict uses CL filename as the key and has
# the md5 digest of the file contents as its value.
# The md5 digest is automatically updated if the file changes.
#
# - the persistent cache has the md5 digest as the key
# and the Pycode object as the value.
#
# This scheme allows files with different path names to
# be found in the cache (since the file contents, not the
# name, determine the shelve key) while staying up-to-date
# with changes of the CL file contents when the script is
# being developed.
import dirshelve, stat, md5
_versionKey = 'CACHE_VERSION'
_currentVersion = "v1"
class _FileContentsCache(filecache.FileCacheDict):
def __init__(self):
# create file dictionary with md5 digest as value
filecache.FileCacheDict.__init__(self,filecache.MD5Cache)
class _CodeCache:
"""Python code cache class
Note that old out-of-date cached code never gets
removed in this system. That's because another CL
script might still exist with the same code. Need a
utility to clean up the cache by looking for unused keys...
"""
def __init__(self, cacheFileList):
cacheList = []
flist = []
nwrite = 0
for file in cacheFileList:
db = self._cacheOpen(file)
if db is not None:
cacheList.append(db)
nwrite = nwrite+db[0]
flist.append(file)
self.clFileDict = _FileContentsCache()
self.cacheList = cacheList
self.cacheFileList = flist
self.nwrite = nwrite
# flag indicating preference for system cache
self.useSystem = 0
if not cacheList:
self.warning("Unable to open any CL script cache, "
"performance will be slow")
elif nwrite == 0:
self.warning("Unable to open any CL script cache for writing")
def _cacheOpen(self, filename):
"""Open shelve database in filename and check version
Returns tuple (writeflag, shelve-object) on success or None on failure.
"""
# first try opening the cache read-write
try:
fh = dirshelve.open(filename)
writeflag = 1
except dirshelve.error:
# initial open failed -- try opening the cache read-only
try:
fh = dirshelve.open(filename,"r")
writeflag = 0
except dirshelve.error:
self.warning("Unable to open CL script cache file %s" %
(filename,))
return None
# check version of cache -- don't use it if out-of-date
if fh.has_key(_versionKey):
oldVersion = fh[_versionKey]
elif len(fh) == 0:
fh[_versionKey] = _currentVersion
oldVersion = _currentVersion
else:
oldVersion = 'v0'
if oldVersion == _currentVersion:
return (writeflag, fh)
# open succeeded, but version looks out-of-date
fh.close()
rv = None
msg = ["CL script cache file is obsolete version (old %s, current %s)" %
(`oldVersion`, `_currentVersion`)]
if not writeflag:
# we can't replace it if we couldn't open it read-write
msg.append("Ignoring obsolete cache file %s" % filename)
else:
# try renaming the old file and creating a new one
rfilename = filename + "." + oldVersion
try:
os.rename(filename, rfilename)
msg.append("Renamed old cache to %s" % rfilename)
try:
# create new cache file
fh = dirshelve.open(filename)
fh[_versionKey] = _currentVersion
msg.append("Created new cache file %s" % filename)
rv = (writeflag, fh)
except dirshelve.error:
msg.append("Could not create new cache file %s" % filename)
except OSError:
msg.append("Could not rename old cache file %s" % filename)
self.warning(string.join(msg,"\n"))
return rv
def warning(self, msg, level=0):
"""Print warning message to stderr, using verbose flag"""
if Verbose >= level:
sys.stdout.flush()
sys.stderr.write(msg + "\n")
sys.stderr.flush()
def writeSystem(self, value=1):
"""Add scripts to system cache instead of user cache"""
if value==0:
self.useSystem = 0
elif self.cacheList:
writeflag, cache = self.cacheList[-1]
if writeflag:
self.useSystem = 1
else:
self.warning("System CL script cache is not writable")
else:
self.warning("No CL script cache is active")
def close(self):
"""Close all cache files"""
for writeflag, cache in self.cacheList:
cache.close()
self.cacheList = []
self.nwrite = 0
# Note that this does not delete clFileDict since the
# in-memory info for files already read is still OK
# (Just in case there is some reason to close cache files
# while keeping _CodeCache object around for future use.)
def __del__(self):
self.close()
def getIndex(self, filename, source=None):
"""Get cache key for a file or filehandle"""
if filename:
return self.clFileDict.get(filename)
elif source:
# there is no filename, but return md5 digest of source as key
return md5.new(source).digest()
def add(self, index, pycode):
"""Add pycode to cache with key = index. Ignores if index=None."""
if index is None or self.nwrite==0: return
if self.useSystem:
# system cache is last in list
cacheList = self.cacheList[:]
cacheList.reverse()
else:
cacheList = self.cacheList
for writeflag, cache in cacheList:
if writeflag:
cache[index] = pycode
return
def get(self, filename, mode="proc", source=None):
"""Get pycode from cache for this file.
Returns tuple (index, pycode). Pycode=None if not found
in cache. If mode != "proc", assumes that the code should not be
cached.
"""
if mode != "proc": return None, None
index = self.getIndex(filename, source=source)
if index is None: return None, None
for i in range(len(self.cacheList)):
writeflag, cache = self.cacheList[i]
if cache.has_key(index):
pycode = cache[index]
#XXX
# kluge for backward compatibility -- force type of object
# eliminate this eventually
if not hasattr(pycode, 'setFilename'):
import cl2py
pycode.__class__ = cl2py.Pycode
if hasattr(pycode, 'filename'):
del pycode.filename
# replace outmoded object in the cache
if writeflag:
cache[index] = pycode
#XXX
pycode.index = index
pycode.setFilename(filename)
return index, pycode
return index, None
def remove(self, filename):
"""Remove pycode from cache for this file or IrafTask object.
This deletes the entry from the shelve persistent database, under
the assumption that this routine may be called to fix a bug in
the code generation (so we don't want to keep the old version of
the Python code around.)
"""
if type(filename) is not types.StringType:
try:
task = filename
filename = task.getFullpath()
except (AttributeError, TypeError):
raise TypeError(
"Filename parameter must be a string or IrafCLTask")
index = self.getIndex(filename)
# system cache is last in list
irange = range(len(self.cacheList))
if self.useSystem: irange.reverse()
nremoved = 0
for i in irange:
writeflag, cache = self.cacheList[i]
if cache.has_key(index):
if writeflag:
del cache[index]
self.warning("Removed %s from CL script cache %s" % \
(filename,self.cacheFileList[i]), 2)
nremoved = nremoved+1
else:
self.warning("Cannot remove %s from read-only "
"CL script cache %s" % \
(filename,self.cacheFileList[i]))
if nremoved==0:
self.warning("Did not find %s in CL script cache" % filename, 2)
# create code cache
userCacheDir = os.path.join(userIrafHome,'pyraf')
if not os.path.exists(userCacheDir):
try:
os.mkdir(userCacheDir)
print 'Created directory %s for cache' % userCacheDir
except OSError:
print 'Could not create directory %s' % userCacheDir
dbfile = 'clcache'
codeCache = _CodeCache([
os.path.join(userCacheDir,dbfile),
os.path.join(pyrafDir,dbfile),
])
del userCacheDir, dbfile
| true |
5007f6e20ce092d26787a56cbf5c21648903242b | Python | ashutosh-narkar/LeetCode | /add_numbers.py | UTF-8 | 1,000 | 4.0625 | 4 | [] | no_license | #!/usr/bin/env python
'''
You are given two linked lists representing two non-negative numbers.
The digits are stored in reverse order and each of their nodes contain a single digit.
Add the two numbers and return it as a linked list.
Input: (2 -> 4 -> 3) + (5 -> 6 -> 4)
Output: 7 -> 0 -> 8
'''
class ListNode(object):
def __init__(self, x):
self.val = x
self.next = None
def add_two_numbers(l1, l2):
carry = 0
# create dummy node for result list
dummy = ListNode(0)
current = dummy
while l1 or l2:
num1, num2 = 0, 0
if l1:
num1 = l1.val
l1 = l1.next
if l2:
num2 = l2.val
l2 = l2.next
num = num1 + num2 + carry
carry = num / 10
num = num % 10
node = ListNode(num)
current.next = node
current = current.next
# final carry
if carry > 0:
node = ListNode(carry)
current.next = node
return dummy.next
| true |
6acab6088a14506cbf0d3d8ab73b395f01cf07f6 | Python | mpyrev/checkio | /secret-message.py | UTF-8 | 109 | 2.90625 | 3 | [] | no_license | def find_message(text):
"""Find a secret message"""
return ''.join([c for c in text if c.isupper()])
| true |
4dae20f84c55fe4dba01ea37a021e39ed54ba462 | Python | EmanuelaMollova/CreeperPP | /creeper_pp/personality_predictor.py | UTF-8 | 2,230 | 2.59375 | 3 | [] | no_license | from sklearn import svm
import numpy as np
import re
from sklearn.preprocessing import normalize
from sklearn.neighbors import KNeighborsRegressor
class PersonalityPredictor(object):
def __init__(self, nn):
self.nn = nn
self.o_clf = KNeighborsRegressor(n_neighbors=self.nn)
self.c_clf = KNeighborsRegressor(n_neighbors=self.nn)
self.e_clf = KNeighborsRegressor(n_neighbors=self.nn)
self.a_clf = KNeighborsRegressor(n_neighbors=self.nn)
self.n_clf = KNeighborsRegressor(n_neighbors=self.nn)
self.features = []
self.o_value = []
self.c_value = []
self.e_value = []
self.a_value = []
self.n_value = []
def register(self, data):
for user_id in data:
if 'f' in data[user_id]:
self.o_value.append(self.make_float(data[user_id]['o']))
self.c_value.append(self.make_float(data[user_id]['c']))
self.e_value.append(self.make_float(data[user_id]['e']))
self.a_value.append(self.make_float(data[user_id]['a']))
self.n_value.append(self.make_float(data[user_id]['n']))
self.features.append(data[user_id]['f'])
else:
break
def make_float(self, value):
if isinstance(value, basestring):
return float(re.sub("[^0-9.]", "", value))
else:
return float(value)
def train(self):
self.features = normalize(self.features)
self.o_clf.fit(self.features, self.o_value)
self.c_clf.fit(self.features, self.c_value)
self.e_clf.fit(self.features, self.e_value)
self.a_clf.fit(self.features, self.a_value)
self.n_clf.fit(self.features, self.n_value)
def predict(self, features):
o = self.o_clf.predict([features]).tolist()[0]
c = self.c_clf.predict([features]).tolist()[0]
e = self.e_clf.predict([features]).tolist()[0]
a = self.a_clf.predict([features]).tolist()[0]
n = self.n_clf.predict([features]).tolist()[0]
return {
'o': o,
'c': c,
'e': e,
'a': a,
'n': n
}
| true |
3f404c13955c629300cb3e996f44d0a408b0ef32 | Python | momentum-team-4/python-word-freq-tleach01 | /word_frequency.py | UTF-8 | 1,411 | 3.390625 | 3 | [] | no_license | STOP_WORDS = [
'a', 'an', 'and', 'are', 'as', 'at', 'be', 'by', 'for', 'from', 'has', 'he',
'i', 'in', 'is', 'it', 'its', 'of', 'on', 'that', 'the', 'to', 'were',
'will', 'with'
]
def print_word_freq(file):
"""Read in `file` and print out the frequency of words in that file."""
with open(file, 'r') as praise:
poem = praise.read()
fileWords = poem.lower().replace('-', " ").replace(":", '').replace(",", " ").replace(".", " ").replace("/n", " ").replace('"', " ").split()
# for line in text:
# line = line.strip()
# line = line.lower()
# words = line.split(" ")
counted = []
for word in fileWords:
if word not in STOP_WORDS:
counted.append(word)
d = {}
for word in counted:
if word not in d.keys():
d[word] = 1
else:
d[word] += 1
d_filtered = sorted(d, key=d.get, reverse=True)
for num in d_filtered:
print(num, '|', d[num], '*' * d[num])
if __name__ == "__main__":
import argparse
from pathlib import Path
parser = argparse.ArgumentParser(
description='Get the word frequency in a text file.')
parser.add_argument('file', help='file to read')
args = parser.parse_args()
file = Path(args.file)
if file.is_file():
print_word_freq(file)
else:
print(f"{file} does not exist!")
exit(1)
| true |
0fef7bc0688f885a845b928e1265490ee42a7898 | Python | pedrolucasmr/Algorithms_C_Sharp | /Services/ChartService(WIP).py | UTF-8 | 3,574 | 2.78125 | 3 | [] | no_license | import plotly.graph_objects as _plotly
import mysql.connector as _connector
from mysql.connector import Error
import sys
sys.path.append("../Data")
import datetime
import dbConfig
_dbConfig=dbConfig.readDbConfig()
def GetRuns():
try:
connection=_connector.Connect(**_dbConfig)
connection._open_connection()
cursor=connection.cursor()
cursor.callproc("GetAllRuns")
for result in cursor.stored_results():
print(result.fetchall())
except Error as e:
print(e)
finally:
cursor.close()
connection.close()
def GetSpecificRun(algorithm):
try:
connection=_connector.connect(**_dbConfig)
connection._open_connection()
cursor=connection.cursor()
cursor.callproc("GetSpecificRuns",[algorithm])
for result in cursor.stored_results():
print(result)
except Error as e:
print(e)
finally:
cursor.close()
connection.close()
def GetRunTimeResults(algorithm):
try:
connection=_connector.connect(**_dbConfig)
connection._open_connection()
cursor=connection.cursor(dictionary=True)
results=cursor.callproc("GetRunResults",[algorithm,0,0])
if(cursor.with_rows):
return results.fetchall()
except Error as e:
print(e)
finally:
cursor.close()
connection.close()
def GetAllRunsTimeResults():
try:
connection=_connector.connect(**_dbConfig)
connection._open_connection()
cursor=connection.cursor()
cursor.callproc("GetAllRunsResults")
results=cursor.fetchall()
for result in results:
print(result)
return results
except Error as e:
print(e)
finally:
cursor.close()
connection.close()
def ArithmeticAverage(list):
sum=0
print(list[20])
for i in list:
if(list[i]<0):
currentValue=currentValue*-1
sum=sum+i[1]
results=(sum/len(list))/1000
print(sum)
print(len(list))
print(results)
return (results)
def ZeroToAHundred(x):
if(x[2]>=0 and x[2]<=99):
return True
else:
return False
def AHundredToAThousand(x):
if(x[2]>=100 and x[2]<=999):
return True
else:
return False
def AThousandToTenThousand(x):
if(x[2]>=1000 and x[2]<=9999):
return True
else:
return False
def TenThousandToAHundredThousand(x):
if(x[2]>=10000 and x[2]<=99999):
return True
else:
return False
def AHundredThousandToAMillion(x):
if(x[2]>= 100000):
return True
else:
return False
def SingleAlgorithmChart(algorithm):
results=GetRunTimeResults(algorithm)
for i in results:
print(i)
dozens=list(filter(ZeroToAHundred,results))
hundreds=list(filter(AHundredToAThousand,results))
thousands=list(filter(AThousandToTenThousand,results))
dozensOfThousands=list(filter(TenThousandToAHundredThousand,results))
hundredsofThousands=list(filter(AHundredThousandToAMillion,results))
yAxis=[ArithmeticAverage(dozens),ArithmeticAverage(hundreds),ArithmeticAverage(thousands),ArithmeticAverage(dozensOfThousands),ArithmeticAverage(hundredsofThousands)]
xAxis=["Dezenas","Centenas","Milhares","Dezenas de Milhares","Centenas de milhares"]
figure=_plotly.Figure([_plotly.Bar(x=xAxis,y=yAxis)])
figure.show()
SingleAlgorithmChart("Bubble Sort")
#GetRuns()
#GetSpecificRun("Bubble Sort")
#GetRunTimeResults("Bubble Sort")
#GetAllRunsTimeResults() | true |
3213667c7dec9db73dbaff0db0fd985fbe76bc68 | Python | warbear0129/Margin-Bot | /settings.py | UTF-8 | 2,876 | 2.625 | 3 | [] | no_license | import ConfigParser, os
from utils import *
class Settings(object):
c = ConfigParser.ConfigParser()
def __init__(self, pair):
self._path = "./config/%s.ini" % pair
self.settings = self.parseConfig
printInfo("Checking config file .....\n")
if not os.path.isfile(self._path):
printInfo("No config file ..... will create one")
self.createConfig()
self.settings = self.parseConfig()
if not self.validateConfig():
quit()
def parseConfig(self):
settings = {}
self.c.read(self._path)
for o in self.c.options("Settings"):
try:
settings[o] = float(self.c.get("Settings", o))
except:
settings[o] = self.c.get("Settings", o)
return settings
def createConfig(self):
with open(self._path, 'w') as f:
self.c.add_section('Settings')
self.c.set('Settings', 'candlestickPeriod', 1800)
self.c.set('Settings', 'candlestickHours', 6)
self.c.set('Settings', 'ema', 4)
self.c.set('Settings', 'maxBalance', 0.85)
self.c.set('Settings', 'profitMargin', 1.01)
self.c.set('Settings', 'longMargin', 0.985)
self.c.set('Settings', 'shortMargin', 1.02)
self.c.set('Settings', 'delta', 0.9)
self.c.set('Settings', 'stopLimit', 0.05)
self.c.set('Settings', 'stopLimitTimeout', 2.5)
self.c.set('Settings', 'marginCloseTimeout', 2)
self.c.write(f)
printSuccess("Config file generated, please modify the config file and re-run this script")
quit()
def validateConfig(self):
ok = True
if self.settings["longmargin"] >= 1.0:
printError("Long margin cannot be >= 1.00")
ok = False
if self.settings["shortmargin"] <= 1.0:
printError("Short margin cannot be <= 1.00")
ok = False
if self.settings["profitmargin"] <= 1.0:
printError("Profit margin cannot be <= 1.0")
ok = False
if self.settings["candlestickperiod"] not in [300, 900, 1800, 7200, 14400, 86400]:
printError("Invalid candle stick period, use 300, 900, 1800, 7200, 14400, or 86400")
ok = False
if self.settings["maxbalance"] <= 0.0:
printError("Max balance cannot be < 0.0")
ok = False
if self.settings["maxbalance"] >= 0.95:
printError("Max balance cannot be >= 0.95")
ok = False
if self.settings["stoplimit"] <= 0.01:
printError("Stop limit cannot be <= 0.01")
ok = False
if self.settings["stoplimittimeout"] < 0.5:
printError("Stop limit timeout cannot be less than 0.5 hours")
ok = False
if self.settings["marginclosetimeout"] < 1:
printError("Margin close timeout cannot be < 1")
ok = False
return ok
def refresh(self):
oldSettings = self.settings
self.settings = self.parseConfig()
if not self.validateConfig():
self.settings = oldSettings
quit()
print "\n"
for key in self.settings:
if self.settings[key] != oldSettings[key]:
printHeader(">> %s changed from %s to %s" % (key, oldSettings[key], self.settings[key]))
| true |
04611a08860b50d9e56bdd8c40bf9897ca393801 | Python | Julestevez/Quadrotor-simulator | /Horizontal control of a multidrone system/main.py | UTF-8 | 8,482 | 2.859375 | 3 | [] | no_license | #main file
#This code represents the control of two quadrotors in a horizontal motion in X-Y directions
import numpy as np
import math
import matplotlib.pyplot as plt
import imageio
from skimage.transform import resize
from mpmath import *
from Quadrotor import quadrotor
from Quadrotor import angle_objective
from Quadrotor import EulerIntegration
from Quadrotor import Follower
from Quadrotor import VelAccelDesired
from Visual3D import Visual3D
#***Dynamic parameters of DRONE****
L=0.25 #[m]
b = 1e-5
I = np.diag([5e-3, 5e-3, 10e-3]) #[kgm2]
k=3e-5
m=0.5 #[kg]
kd=0.25
g=9.80 #[m/s2]
dt=0.1 #[s]
###############################################################
############## Inicialization of variables ####################
###############################################################
N=450
Thrust1, Thrust2 =5, 5
Kxd1, Kxp1 = 0.76, 0.22
Kyd1, Kyp1 = 0.76, 0.22
Kxd2, Kxp2 = 0.76, 0.22
Kyd2, Kyp2 = 0.76, 0.22
#FINAL COORDINATES TO REACH
final_x_coordinate=90
final_y_coordinate=90
#State space representation: [theta phi gamma
#theta_dot phi_dot gamma_dot
# x y z
# x_dot y_dot z_dot]
States1= [0]*12 #States of the Quadrotor1
States2= [0]*12 #States of the Quadrotor2
States2[6]=-50 #starting position
States2[7]=-60
#Desired states representation: [x_pos_d, x_vel_d, x_accel_d,
# y_pos_d, y_vel_d, y_accel_d,
# theta_d, phi_d, psi_d, height_desired]
S_desired1 = [0]*10
S_desired2 = [0]*10
###############################################################
################# end of initialization ######################
###############################################################
#VISUALIZATION
fig = plt.figure()
ax = fig.add_subplot(projection='3d') #this line is only useful for 3d projection, not for ploting graphs
# for j in range(N):
def plot_for_offset(n): ## function to create the animation
Thrust1, Thrust2 =5, 5
Kxd1, Kxp1 = 0.76, 0.22
Kyd1, Kyp1 = 0.76, 0.22
Kxd2, Kxp2 = 0.76, 0.22
Kyd2, Kyp2 = 0.76, 0.22
Old_States=States1[5:7] #temporary variables
#Calculus of DESIRED vel and accel
x_vel_desired1,x_accel_desired1 = VelAccelDesired(final_x_coordinate,States1[6])
y_vel_desired1,y_accel_desired1 = VelAccelDesired(final_y_coordinate,States1[7])
#Assign desired position, speed and velocity in X axis
S_desired1[0], S_desired1[1], S_desired1[2] = final_x_coordinate,x_vel_desired1,x_accel_desired1
#Assign desired position, speed and velocity in Y axis
S_desired1[3], S_desired1[4], S_desired1[5] = final_y_coordinate,y_vel_desired1,y_accel_desired1
#OBJECTIVE ANGLES
#Theta
S_desired1[6] = angle_objective(S_desired1[0:3],States1[6],States1[9],States1[0],Thrust1,Kxp1,Kxd1)
#Phi
S_desired1[7] = angle_objective(S_desired1[3:7],States1[7],States1[10],States1[1],Thrust1,Kyp1,Kyd1)
#DRONE height position, velocity and acceleration
#CALCULUS OF THE STATES OF THE QUADROTOR
States1[0:6], Thrust_calc1, z_accel1, x_accel1, y_accel1= quadrotor(States1,S_desired1[6:11],Thrust1)
#smoothing of the thrust
if Thrust_calc1-Thrust1>0.05:
Thrust1=Thrust_calc1+0.05
elif Thrust1-Thrust_calc1>0.05:
Thrust1=Thrust_calc1-0.05
#CALCULUS OF POS AND VELOCITY
States1[9],States1[6]= EulerIntegration(x_accel1,States1[9],States1[6]) #X axis
States1[10],States1[7]= EulerIntegration(y_accel1,States1[10],States1[7]) #y axis
###################################################################
###################### Follower DRONE #############################
###################################################################
x_pos_desired2, y_pos_desired2 = Follower(States1[6:8],Old_States) #previous drone states as argument
#Calculus of DESIRED vel and accel
x_vel_desired2,x_accel_desired2 = VelAccelDesired(x_pos_desired2,States2[6])
y_vel_desired2,y_accel_desired2 = VelAccelDesired(y_pos_desired2,States2[7])
#Assign desired position, speed and velocity in X axis
S_desired2[0], S_desired2[1], S_desired2[2] = x_pos_desired2,x_vel_desired2,x_accel_desired2
#Assign desired position, speed and velocity in Y axis
S_desired2[3], S_desired2[4], S_desired2[5] = y_pos_desired2,y_vel_desired2,y_accel_desired2
#ANGLE OBJECTIVE
#Theta - X direction
S_desired2[6] = angle_objective(S_desired2[0:3],States2[6],States2[9],States2[0],Thrust2,Kxp2,Kxd2)
#Phi - Y direction
S_desired2[7] = angle_objective(S_desired2[3:7],States2[7],States2[10],States2[1],Thrust2,Kyp2,Kyd2)
#DRONE height position, velocity and acceleration
#CALCULUS OF THE STATES OF THE QUADROTOR
States2[0:6], Thrust_calc2, z_accel2, x_accel2, y_accel2= quadrotor(States2,S_desired2[6:11],Thrust2)
#smoothing of the thrust
if Thrust_calc2-Thrust2>0.05:
Thrust2=Thrust_calc2+0.05
elif Thrust2-Thrust_calc2>0.05:
Thrust2=Thrust_calc2-0.05
#CALCULUS OF POS and VELOCITY
States2[9],States2[6]= EulerIntegration(x_accel2,States2[9],States2[6]) #x axis
States2[10],States2[7]= EulerIntegration(y_accel2,States2[10],States2[7]) #y axis
#############################################################################
########## VISUALIZATION 1: 3d projection gif animation saving ##############
#############################################################################
VecStart_x1,VecStart_y1,VecStart_z1, VecEnd_x1,VecEnd_y1,VecEnd_z1= Visual3D(States1[0],States1[1],States1[6],States1[7])
VecStart_x2,VecStart_y2,VecStart_z2, VecEnd_x2,VecEnd_y2,VecEnd_z2= Visual3D(States2[0],States2[1],States2[6],States2[7])
ax.cla()
ax.set_xlim3d(-100, 200)
ax.set_ylim3d(-50,150)
ax.set_zlim3d(0,100)
ax.set_xlabel('X axis')
ax.set_ylabel('Y axis')
ax.set_zlabel('Z axis')
for i in range(2):
ax.plot([VecStart_x1[i], VecEnd_x1[i]], [VecStart_y1[i],VecEnd_y1[i]],zs=[VecStart_z1[i],VecEnd_z1[i]])
ax.plot([VecStart_x2[i], VecEnd_x2[i]], [VecStart_y2[i],VecEnd_y2[i]],zs=[VecStart_z2[i],VecEnd_z2[i]])
ax.scatter3D(States1[6],States1[7],50,s=30)
ax.scatter3D(States2[6],States2[7],50,s=30)
fig.canvas.draw()
img = np.frombuffer(fig.canvas.tostring_rgb(), dtype='uint8')
img = img.reshape(fig.canvas.get_width_height()[::-1] + (3,))
return img
#*** This is just some code to just visualize the animation, without saving it in a gif.
# ax.scatter3D(States3[6],States3[7],0)
# ax.scatter3D(vectorX1,0,z1)
# plt.pause(0.005)
#########################################################
###### End of VISUALIZATION 1: 3d projection ##########
########################################################
###################################################################
######### VISUALIZATION 2: X-Y displacement of 2 drones ###########
###################################################################
# This 2nd visualization plots some variables of the drones along time. We can choose either the 1st or 2nd visualization of the code.
#plot displacement
""" plt.subplot(2,2,1)
plt.title("Variables of Drone1")
plt.plot(j,States1[6],'og',markersize=2)
plt.ylabel("displacement in X [m]")
plt.subplot(2,2,2)
plt.title("Variables of Drone2")
plt.plot(j,States2[6],'og',markersize=2)
plt.ylabel("displacement in X [m]")
plt.subplot(2,2,3)
plt.plot(j,States1[7],'or',markersize=2)
plt.ylabel("displacement in Y [m]")
plt.subplot(2,2,4)
plt.plot(j,States2[7],'or',markersize=2)
plt.ylabel("displacement in Y [m]") """
###################################################################
####### End of VISUALIZATION 2: X-Y displacement of 2 drones ######
###################################################################
imageio.mimsave(r'C:\\drone_animation.gif', [plot_for_offset(n) for n in range(1, 450)], duration=10) #how to save the animation in a gif
#plt.show()
| true |
aaadaa7cee19cc863f6e9f8c6ce1347f38a2ddbf | Python | P-ppc/leetcode | /algorithms/MaxAreaOfIsland/solution.py | UTF-8 | 1,460 | 3.15625 | 3 | [] | no_license | class Solution(object):
def maxAreaOfIsland(self, grid):
"""
:type grid: List[List[int]]
:rtype: int
"""
max_area = 0
dfs_map = {}
for i in range(0, len(grid)):
for j in range(0, len(grid[0])):
if grid[i][j] == 1 and dfs_map.get(str(i) + 'X' + str(j)) == None:
stack = [{ 'i': i, 'j': j }]
area = 0
while len(stack) > 0:
point = stack.pop()
i = point['i']
j = point['j']
if dfs_map.get(str(i) + 'X' + str(j)) == 1:
continue
else:
area += 1
dfs_map[str(i) + 'X' + str(j)] = 1
if i >= 1 and grid[i - 1][j] == 1:
stack.append({ 'i': i - 1, 'j': j })
if i + 1 < len(grid) and grid[i + 1][j] == 1:
stack.append({ 'i': i + 1, 'j': j })
if j >= 1 and grid[i][j - 1] == 1:
stack.append({ 'i': i, 'j': j - 1 })
if j + 1 < len(grid[0]) and grid[i][j + 1] == 1:
stack.append({ 'i': i, 'j': j + 1 })
max_area = max(max_area, area)
return max_area | true |
1fa4a5b50167e90c0f383e6d38a38ad79f0e71b6 | Python | ipcoo43/hellopython | /nine.py | UTF-8 | 347 | 3.53125 | 4 | [] | no_license | a=int(input('정수 하나 입력 : '))
if a==1:
print(0)
elif a==0:
print(1)
b,c=input('정수 두개 입력 : ').split()
b=int(b)
c=int(c)
if a==1 and b==1:
print(1)
else:
print(0)
if a==1 or b==1:
print(1)
else:
print(0)
if a!=b:
print(1)
else:
print(0)
if a==b:
print(1)
else:
print(0)
if a==0 and b==0
print(1)
else:
print(0) | true |
6bd0632e256a95e50e1fb29d7af2b1ce141ebf1e | Python | AdamZhouSE/pythonHomework | /Code/CodeRecords/2860/47774/307043.py | UTF-8 | 357 | 3.15625 | 3 | [] | no_license | def dfs(i):
v[i]=1
for j in range(n):
if v[j]==0 and (x[i]==x[j] or y[i]==y[j]):
dfs(j)
n=int(input())
x=[0 for i in range(1000)]
y=[0 for i in range(1000)]
v=[0 for i in range(1000)]
for i in range(n):
x[i],y[i]=map(int,input().split(' '))
ans=0
for i in range(n):
if v[i]==0:
dfs(i)
ans+=1
print(ans-1) | true |
403adfac9f01e034db6648f2527dd75911391579 | Python | KarlWenzel/MyScikit-Learn | /class-starting-point.py | UTF-8 | 893 | 2.671875 | 3 | [
"MIT"
] | permissive | import numpy as np
np.random.seed(42) # we may or may not need a seed, but it's a good practice for reproducibility
# pandas tutorial - https://pandas.pydata.org/pandas-docs/stable/10min.html
# pandas cheatsheet - http://datacamp-community.s3.amazonaws.com/9f0f2ae1-8bd8-4302-a67b-e17f3059d9e8
import pandas as pd
from pandas.plotting import scatter_matrix
# jupyter notebook supports inline plotting
import matplotlib
import matplotlib.pyplot as plt
# http://scikit-learn.org/stable/modules/classes.html
from sklearn import model_selection, linear_model
from sklearn.metrics import mean_squared_error
# download from https://www.kaggle.com/mirichoi0218/insurance/version/1
dataFile = "C:\\Users\\zkew18d\\source\\repos\\MyScikit-Learn\\data\\insurance.csv"
# read the data and output some basic descriptive info
rawData = pd.read_csv(dataFile)
rawData.head()
| true |
f468ceb05d4191e5a45373a9d12bcedc84c7f3e1 | Python | gwib/s1DataAndProcess | /validation_Dataset.py | UTF-8 | 1,540 | 2.8125 | 3 | [] | no_license | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Jun 29 16:30:21 2020
@author: GalinaJonat
"""
# investigating GLIMS
import pandas as pd
import numpy as np
import datetime as dt
import matplotlib.pyplot as plt
glims = pd.read_csv('/Volumes/ElementsSE/thesisData/validation/glims/glimsPolygons_clipped.csv')
print(glims.columns)
print('Number of unique glacier entries in DB: '+ str(len(set(glims.glac_id))))
set(glims.anlys_time) # publication date
#Out[12]: {'2014-12-01T00:00:00', '2018-08-01T00:00:00'}
set(glims.src_date) # image date
#Out[13]: {'2001-08-29T00:00:00', '2016-08-30T00:00:00'}
set(glims.release_dt)
#Out[14]: {'2014-12-01T11:00:00', '2018-08-22T09:00:00'}
glims_short = glims.drop(['local_id', 'glac_stat', 'subm_id', 'rc_id'], axis=1)
#date_time_obj = datetime.datetime.strptime(date_time_str, '%Y-%m-%dT%H:%M:%S.%f')
glims_short['src_date'] = glims_short['src_date'].apply(lambda d: dt.datetime.strptime(d, '%Y-%m-%dT%H:%M:%S'))
glims_short['img_year'] = glims_short['src_date'].apply(lambda x: x.year)
glims_late = glims_short[glims_short.img_year > 2001]
glimsSub = pd.read_csv('/Volumes/ElementsSE/thesisData/validation/glims/glims_subset.csv')
glimsSub_short = glimsSub.drop(['local_id', 'glac_stat', 'subm_id', 'rc_id'], axis=1)
figGlimsHist, axGlimsHist = plt.subplots(dpi=150)
axGlimsHist.hist(glims.area, bins=10, color='#0098DB')
axGlimsHist.set_ylabel('Glacier Count') #note:changed axis names, just need to rerun
axGlimsHist.set_xlabel(r'Glacier Area in $m^2$')
plt.show() | true |
ffce21504ed39e139559c986162ecc4eca42c683 | Python | bipsen/noun_finder | /nlp_tool.py | UTF-8 | 2,011 | 3.234375 | 3 | [] | no_license | """
This script assumes the text column is called "text".
"""
import pandas as pd
import stanfordnlp
import os
import string
import nltk
from tqdm import tqdm
from nltk.corpus import stopwords
nltk.download('stopwords')
"""
Choose which types of words (eg. nouns, verbs) are desired.
For POS tags, see https://universaldependencies.org/u/pos/
"""
wanted_pos = ['NOUN']
def clean_text(text):
"""
Remove punctuation
"""
return text.translate(str.maketrans('', '', string.punctuation))
def get_lemma(token):
"""
Get the lemma from the tokeniszed sentences
"""
return [word.lemma for sent in token.sentences
for word in sent.words]
def remove_stop(words):
"""
Remove stop words
"""
return [word for word in words if word not in stop_words]
def filter_pos(token):
"""
This is for filtering based on word type
"""
filtered = []
for sent in token.sentences:
filtered.extend([word.lemma for word in sent.words
if word.upos in wanted_pos])
filtered = list(set(filtered))
return filtered
# Download dansk model for nlp.
if not os.path.exists(os.path.join(os.environ['HOME'],
'stanfordnlp_resources', 'da_ddt_models')):
stanfordnlp.download('da')
# Set up nlp pipeline
nlp = stanfordnlp.Pipeline(processors='tokenize,mwt,lemma,pos', lang='da')
# Read data. Change to correspond.
df = pd.read_csv('data.csv')
# For progress bar
tqdm.pandas()
# Get get stop words
stop_words = stopwords.words('danish')
df['tokens'] = df['text'].progress_apply(lambda text: nlp(text))
df['lemmas'] = df['tokens'].apply(get_lemma)
df['lemmas_string'] = df['lemmas'].apply(lambda x: " ".join(x))
df['without_stop'] = df['lemmas'].apply(remove_stop)
df['filtered'] = df['tokens'].apply(filter_pos)
df['filtered'] = df['filtered'].apply(lambda x: ", ".join(x))
df.drop(['tokens', 'lemmas', 'lemmas_string', 'without_stop'],
axis=1, inplace=True)
df.to_csv('output.csv')
| true |
ea818f0f1ab6c4db9094829e98b08f9974a506df | Python | AndrewCarracher/Exercism | /python/isogram.py | UTF-8 | 510 | 3.453125 | 3 | [] | no_license | def is_isogram(string):
no_char_match = True
char_string = split_string(string)
counter=1
count=1
for char in char_string:
if char.isalpha():
while count < len(char_string):
if char.lower() == char_string[count].lower():
no_char_match = False
count = count + 1
counter = counter + 1
count = counter
return no_char_match
def split_string(word):
return [char for char in word]
| true |
95013101e44bbf892c65b19cb657b914d7038d20 | Python | NBlanchar/exercism | /series/series.py | UTF-8 | 331 | 3.25 | 3 | [] | no_license | def slices(cadena, longitud):
if(len(cadena) >= longitud and longitud >= 1):
resultado = []
for x in range(len(cadena)):
serie = cadena[x:x+longitud]
if(len(serie) == longitud):
resultado.append(serie)
return resultado
else:
raise ValueError('error')
| true |
95a603dd2f2f68b48b4b199038c9798a004e4e20 | Python | rtstock/EfficientFrontierFromLocalRepository | /py/test_randomnumbers2.py | UTF-8 | 1,008 | 2.828125 | 3 | [] | no_license | # -*- coding: utf-8 -*-
"""
Created on Wed Aug 05 15:06:10 2015
@author: justin.malinchak
"""
print '--------------------'
import pandas as pd
import numpy as np, numpy.random
SymbolsList = ['WMT','NKE','T','MCD','JPM','^RUT','XOM','MSFT','YHOO','QQQ','HD','GS','BAC','LEO']
import random
min = 0.0
max = 1.0
A = 500.0
B = 100.0
def generate(n):
C = [min + i*(max-min)/(n+1) for i in range(1, n+1)]
Y = [0]
for i in range(1,n-1):
# This line should be changed in order to always get positive numbers
# It should be relatively easy to figure out some good random generator
Y.append(random.random())
val = A - C[0]*B
for i in range(1, n-1):
val -= Y[i] * (C[i] - C[0])
val /= (C[n-1] - C[0])
Y.append(val)
val = B
for i in range(1, n):
val -= Y[i]
Y[0] = val
result = []
for i in range(0, n):
result.append([ Y[i]*C[i], Y[i] ])
return result
print generate(10) | true |
8e37b861a829e8951aa5f001e43ea6d783d54b62 | Python | mkhan45/crypto | /rsa/isprimepy | UTF-8 | 367 | 3.34375 | 3 | [] | no_license | #!/bin/python
import sys
import math
def isprime_opt(n: int, primes=[2, 3, 5]):
for i in filter(lambda i: i in primes or isprime_opt(i, primes=primes), range(3, int(math.sqrt(n)) + 1, 2)):
if i not in primes:
primes.append(i)
if n % i == 0:
primes.append(n)
return False
return True
print(isprime_opt(int(sys.argv[1])))
| true |
82bb492cc789a80951ef374de90b1fc52df7d905 | Python | sekhar1926/CSEE_5590_PYTHON_ICP | /team_8_project/Source/cnn.py | UTF-8 | 2,303 | 2.734375 | 3 | [] | no_license |
'''
!tar -xvf crowdai_train_2.tar
!tar -xvf crowdai_test.tar'''
import numpy as np
import keras
from keras.preprocessing import image
from keras.layers import MaxPooling2D,Convolution2D,Dropout, Flatten,Dense,Activation
from keras.models import Sequential,save_model
from keras.utils import np_utils
import os
import cv2
from sklearn.utils import shuffle
path = '/Users/adisekharrajuuppalapati/Downloads/crowdai'
leaf = os.listdir(path)
print(len(leaf),type(leaf))
print(leaf)
print(leaf[0][2:4])
x,y = [], []
for i in leaf:
images = os.listdir(path+"/"+i)
for j in images:
img_path = path+"/"+i+"/"+j
#Better method then cv2.imread
img = image.load_img(img_path, target_size=(28,28))
img = image.img_to_array(img)
#print(img.shape)
#img =img.flatten()
#img = img.reshape(1,784)
# print(img.shape)
# img = img.reshape((28,28))
img = img/255.0
x.append(img)
y.append(int(i[2:4]))
print(images[0])
print(len(y))
print(len(x))
x_data = np.array(x)
y_data = np.array(y)
print(x_data.shape)
print(y_data.shape)
y_data = np_utils.to_categorical(y_data)
print(y_data.shape)
num_classes = y_data.shape[1]
print(num_classes)
x_data , y_data = shuffle(x_data,y_data, random_state = 0)
split = int(0.6*(x_data.shape[0]))
x_train = x_data[:split]
x_test = x_data[split:]
y_train = y_data[:split]
y_test = y_data[split:]
print(x_train.shape)
print(x_test.shape)
print(y_train.shape)
print(y_test.shape)
model = Sequential()
model.add(Convolution2D(32,3,3,input_shape = (28,28,3)))
model.add(Activation('relu'))
model.add(Convolution2D(64,3,3))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size= (2,2)))
model.add(Convolution2D(16,3,3))
model.add(Activation('relu'))
model.add( Flatten() )
model.add( Dropout(0.2) )
model.add(Dense(num_classes))
model.add(Activation('softmax'))
model.summary()
model.compile( optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'] )
tbcallback=keras.callbacks.TensorBoard(log_dir='./Graph1',histogram_freq=10,write_graph=True,write_images=True)
model.fit(x_train,y_train, validation_data=(x_test,y_test),epochs=20,batch_size = 128,callbacks=[tbcallback], shuffle = True )
model.save('Disease_detector_model.h5') | true |
64b3868f614f5101c040f7cfa8bd46270c3618aa | Python | lixiang2017/leetcode | /adventofcode/2021/day6/part1_2/lanternfish.py | UTF-8 | 842 | 3.015625 | 3 | [] | no_license |
from collections import Counter
def get_cnt(file_name, day):
dp = Counter()
with open(file_name) as f:
for line in f:
inits = list(map(int, line.strip().split(',') ))
dp = Counter(inits)
for _ in range(day):
next_dp = Counter()
for timer in range(1, 9):
if timer in dp:
next_dp[timer - 1] = dp[timer]
if 0 in dp:
next_dp[6] += dp[0]
next_dp[8] += dp[0]
dp = next_dp
cnt = sum(dp.values())
print('day: ',day, 'cnt: ', cnt)
return cnt
c1 = get_cnt('input1', 18)
c2 = get_cnt('input1', 80)
c3 = get_cnt('input1', 256)
c = get_cnt('input', 80)
c = get_cnt('input', 256)
'''
day: 18 cnt: 26
day: 80 cnt: 5934
day: 256 cnt: 26984457539
day: 80 cnt: 388739
day: 256 cnt: 1741362314973
''' | true |
c8fcf75dbf5a148d7f1eab116e27b8727455e3b9 | Python | Tom-Lotze/kijkcijferbot | /scrape_kijkcijfers.py | UTF-8 | 585 | 3.0625 | 3 | [] | no_license | import requests
from bs4 import BeautifulSoup as BS
def get_top(url="https://kijkonderzoek.nl/"):
# retrieve the website
response = requests.get(url)
html = BS(response.text, "html.parser")
# extract titles and viewing numbers
titles = html.find_all("td", class_="kc_cdtitle", limit=25)
viewings = html.find_all("td", class_="kc_cdrt0", limit=50)
avg_viewings = [viewings[i] for i in range(0,50,2)]
# construct a ranking
ranking = [(title.div.string, number.string) for title, number in zip(titles, avg_viewings)]
return ranking
| true |
e7e342c1c96d26f36a549c3a98a5635d563b91d8 | Python | lanl/BEE | /beeflow/common/container_path.py | UTF-8 | 947 | 3.296875 | 3 | [
"BSD-2-Clause"
] | permissive | """Path conversion code."""
import os
class PathError(Exception):
"""Path error class."""
def __init__(self, *args):
"""Construct a path error object."""
self.args = args
def _components(path):
"""Convert a path into a list of components."""
if not os.path.isabs(path):
raise PathError('Bind mounts and workdir paths must be absolute')
path = os.path.normpath(path)
return [comp for comp in path.split('/') if comp]
def convert_path(path, bind_mounts):
"""Convert a path outside the container to a path inside the container."""
comps = _components(path)
for outside, inside in bind_mounts.items():
outside = _components(outside)
inside = _components(inside)
if comps[:len(outside)] == outside:
base = comps[len(outside):]
inside.extend(base)
new_path = '/'.join(inside)
return f'/{new_path}'
return path
| true |
307656d57298abe8eaae66effa577a0a3d0e3a4e | Python | Taewan-P/attendance-check-nfc | /nfctoid.py | UTF-8 | 234 | 2.828125 | 3 | [] | no_license | from random import randint
def idtest():
return "AD:CG:3F"
def scan_id():
id = ['AD:CG:3F:4B', 'EF:5F:95:60', '3D:51:B9:9A', '25:DB:C0:A4', '4D:D0:56:7D']
num = randint(0, len(id)-1)
#print(id[num])
return id[num] | true |
67e67d6f06c2c2740405cd8ae2e1f034ff1456f4 | Python | nibolyoung/leetcode | /.leetcode/1656.设计有序流.py | UTF-8 | 592 | 3.390625 | 3 | [] | no_license | #
# @lc app=leetcode.cn id=1656 lang=python3
#
# [1656] 设计有序流
#
# @lc code=start
class OrderedStream:
def __init__(self, n: int):
self.id = 1
self.mp = {}
self.cnt = n
def insert(self, id: int, value: str) -> List[str]:
self.mp[id] = value
result = []
while(self.id in self.mp.keys()):
result.append(self.mp[self.id])
self.id += 1
return result
# Your OrderedStream object will be instantiated and called as such:
# obj = OrderedStream(n)
# param_1 = obj.insert(id,value)
# @lc code=end
| true |
5874019a4d65a1833078b0b05fb28bcb3e01c8e8 | Python | IvanBodnar/geocoder_rest | /geocoder/tests/tests_helpers.py | UTF-8 | 4,392 | 2.890625 | 3 | [] | no_license | from django.test import TestCase
from django.db import connection
from geocoder.helpers import Calle, get_calles, interseccion, altura_calle, tramo
from geocoder.models import CallesGeocod
from geocoder.exceptions import CalleNoExiste, InterseccionNoExiste
from .database_definitions import *
def preparar_datos():
"""
Incorpora a la base de datos que se crea para el testing
las funciones del geocodificador y dos tramos de
calles para prueba.
Las definiciones vienen de database_definitions.py.
Se usa cursor.execute() porque CallesGeocod.objects.raw()
no funciona.
"""
with connection.cursor() as cursor:
# Incorporar las funciones
cursor.execute(union_geom)
cursor.execute(union_geom_v2)
cursor.execute(existe_calle)
cursor.execute(altura_total_calle)
cursor.execute(existe_altura)
cursor.execute(punto_interseccion)
cursor.execute(altura_direccion_calle)
# Incorporar dos tramos que se intersectan
CallesGeocod.objects.create(**cabildo_2000)
CallesGeocod.objects.create(**cabildo_2100)
CallesGeocod.objects.create(**juramento_2350)
CallesGeocod.objects.create(**juramento_2400)
class GeocoderCalleTestCase(TestCase):
"""
Testea la clase Calle, que maneja las funciones de la base
de datos que conforman el geocodificador
"""
def setUp(self):
preparar_datos()
def test_crea_calle(self):
"""
Testea si la calle es creada.
:return:
"""
calle = Calle('cabildo')
self.assertEqual(calle.nombre, 'cabildo')
def test_devuelve_punto_interseccion(self):
"""
Testea si la suma de las instancias de Calle
devuelve el punto correcto.
:return:
"""
calle1 = Calle('cabildo')
calle2 = Calle('juramento')
resultado = calle1 + calle2
self.assertEqual('POINT(-58.4566933131458 -34.5620356414316)', resultado)
def test_devuelve_punto_altura(self):
"""
Testea si Calle.ubicar_altura() devuelve el
punto correcto.
:return:
"""
calle = Calle('cabildo')
resultado = calle.ubicar_altura(2002)
self.assertEqual('POINT(-58.4558887506853 -34.5629698844945)', resultado)
def test_raises_callenoexiste(self):
"""
Testea si el ingreso de una calle que no existe
levanta la excepcion CalleNoExiste.
:return:
"""
with self.assertRaises(CalleNoExiste) as context:
Calle('aaabbbb')
self.assertTrue('la calle aaaabbbb no existe', context.exception)
def test_raises_interseccionnoexiste(self):
"""
Testea que la suma de dos calles que no se intersectan
levante la excepcion InterseccionNoExiste.
:return:
"""
with self.assertRaises(InterseccionNoExiste) as context:
calle1 = Calle('juramento')
calle2 = Calle('juramento')
calle1 + calle2
self.assertTrue('no se encontro la interseccion', context.exception)
class GeocoderFuncionesHelpersTestCase(TestCase):
"""
Testea que las funciones wrapper devuelvan los
correspondientes diccionarios.
"""
def setUp(self):
preparar_datos()
def test_get_calles(self):
calles = get_calles()
self.assertEqual(['cabildo', 'juramento'], calles)
def test_interseccion(self):
calle1 = 'cabildo'
calle2 = 'juramento'
self.assertEqual({"interseccion": "cabildo y juramento",
"coordenadas": "POINT(-58.4566933131458 -34.5620356414316)"},
interseccion(calle1, calle2))
def test_altura_calle(self):
calle = 'cabildo'
altura = 2115
diccionario = {'direccion': 'cabildo 2115',
'coordenadas': 'POINT(-58.456815768952 -34.5618934492396)'}
self.assertEqual(diccionario, altura_calle(calle, altura))
def test_tramo(self):
calle = 'cabildo'
inicial = 2000
final = 2199
diccionario = {'tramo': 'cabildo entre 2000 y 2199',
'coordenadas': 'MULTILINESTRING((-6507277.94176834 -4104650.53997124,'
'-6507369.33307795 -4104521.67427025))'}
self.assertEqual(diccionario, tramo(calle, inicial, final))
| true |
13ab7f607cbeeb23c41306dfaa033132c751fe17 | Python | itbonds/COP3502-Moss | /organize_submissions.py | UTF-8 | 4,124 | 2.90625 | 3 | [] | no_license | from pathlib import Path
import shutil
import re
import config as cfg
zybooks_submission_regex = "^(?P<first_name>[^_]+)_((?P<middle_name>[^_]+)?\_){0,2}(?P<last_name>[^_]+)_(?P<email>[^_]+)_(?P<date>[^_]+)_(?P<time>[^_.]+)\.?(?P<extension>[\w\d]+)?$"
canvas_submission_regex = "^(?P<student_name>[^_]+)\_((?P<late>LATE)\_)?(?P<user_id>[^_]+)_(?P<submission_item_id>[^_]+)_(?P<file_name>[^.]+)\.?(?P<extension>[\w\d]+)?$"
def extract_submissions(config):
shutil.unpack_archive(config.submission_archive, config.submission_folder)
def organize_submissions_zybooks(config):
submission_folder = Path(config.submission_folder)
regex = re.compile(zybooks_submission_regex)
# iterate over each submitted file
for filepath in submission_folder.iterdir():
# ignore any directories, in case they exist
if filepath.is_dir():
continue
result = regex.match(filepath.name)
if result is None:
continue
first_name = result.group('first_name')
last_name = result.group('last_name')
extension = result.group('extension')
if extension is None:
extension = ""
# make the student's directory if it doesn't exist
destination = submission_folder.joinpath(first_name + "_" + last_name)
destination.mkdir(exist_ok=True)
if extension.lower() == 'zip':
shutil.unpack_archive(filepath, destination, 'zip')
filepath.unlink()
def organize_submissions_canvas(config):
"""
Organizes the downloaded submissions from Canvas.
"""
submission_folder = Path(config.submission_folder)
regex = re.compile(canvas_submission_regex)
files = []
files.extend(submission_folder.iterdir())
for filepath in files:
# canvas files are usually zips or the files themselves .pdf etc
if filepath.is_dir():
continue
result = regex.match(filepath.name)
if result is None:
continue
student_name = result.group('student_name')
is_late = result.group('late')
file_name = result.group('file_name')
extension = result.group('extension')
student_folder = submission_folder.joinpath(student_name)
student_folder.mkdir(exist_ok=True)
print('file: {}, student_name: {}, file_name: {}, ext: {}'.format(filepath, student_name, file_name, extension))
# Three possible extension we want to conside zip, java, other
# zip: we want to unarchive the folder and take the java files into the student's folder
# java: place into the student's folder
# other: discard
if extension.lower() == 'zip':
organize_canvas_zip(filepath, student_folder)
elif extension.lower() == 'java':
dest = student_folder.joinpath(file_name + '.java')
print('dest: ' + str(dest))
shutil.move(filepath, dest)
if filepath.exists():
filepath.unlink()
def organize_canvas_zip(filepath, student_folder):
search_queue = []
delete_stack = []
shutil.unpack_archive(filepath, student_folder, 'zip')
search_queue.extend(student_folder.iterdir())
for item in search_queue:
# if the item is a directory, then we add it to the search queue to see if it has any java files
if item.is_dir():
search_queue.extend(item.iterdir())
delete_stack.insert(0,item)
elif item.suffix == '.java': # if it is a java file, then we move it to the student folder
shutil.move(item, student_folder.joinpath(item.name))
else: # not source code, so we remove it
item.unlink()
for item in delete_stack:
if item.is_dir():
item.rmdir()
else:
item.unlink()
if __name__ == "__main__":
config = cfg.Config()
extract_submissions(config)
if config.site_type == cfg.SiteType.CANVAS:
organize_submissions_canvas(config)
elif config.site_type == cfg.SiteType.ZYBOOKS:
organize_submissions_zybooks(config)
| true |
457eeaa540be31b211c2214d607b0c0114d4a3d1 | Python | bluestar31/dinhthanhlong-fundamental-c4e13 | /Session 4/test.py | UTF-8 | 607 | 3.5 | 4 | [] | no_license | # p = ['Tuan anh', 22, 3, 'Moc Chau', 2]
# print(p)
# person = {}
# print(person)
# person = {
# 'name': 'Tuan Anh'
# }
#
# print(person)
# person = {
# 'name': 'Tuan Anh',
# 'age': 22,
# 'home': 'Moc Chau',
# }
#
# print(person['home'])
# person['home'] = 'Ha Noi'
# print(person)
#
# person['project_count'] = 2
# print(person)
string = "Heello I'm Long"
letter_counts = {}
for letter in string.replace(" ", ""):
letter_counts[letter.lower()] = letter_counts.get(letter, 0) + 1
print(letter_counts)
# for letter in string.replace(" ", ""):
# print(letter_counts.get(letter, 0) + 1 )
| true |
ca95756db2bc5b7c3a87d7d7ed1b96c04ec5e6f8 | Python | Jhonnis007/Python | /ex090.py | UTF-8 | 451 | 4.03125 | 4 | [] | no_license | '''
Faça um programa que leia nome e média de uma aluno, guardando também a situação em um diciário. No final, mostre
o conteúdo da entrutura
=> 7 aprovado menor Reprovado
'''
aluno = {'Nome': str(input('Nome do Aluno: ')),
'Media': float(input('Média: '))}
if aluno['Media'] >= 7:
situacao = 'APROVADO'
else:
situacao = 'REPROVADO'
aluno['Situação'] = situacao
for k, v in aluno.items():
print(f'{k} é igual a {v}')
| true |
befcff29bbb08b51e3ca9b0d8a0c810a103d4383 | Python | likcu/networking | /Assignment3/ss.py | UTF-8 | 2,747 | 2.703125 | 3 | [] | no_license | import udt
import config
import util
import struct
import helper
from datetime import datetime
# Stop-And-Wait reliable transport protocol.
class StopAndWait:
# "msg_handler" is used to deliver messages to application layer
# when it's ready.
def __init__(self, local_ip, local_port,
remote_ip, remote_port, msg_handler):
self.network_layer = udt.NetworkLayer(local_ip, local_port,
remote_ip, remote_port, self)
self.msg_handler = msg_handler
self.type = config.MSG_TYPE_DATA
self.seqnum = 0
self.lastSeqnum = -1
self.oldpkt = b''
self.timer = util.PeriodicClosure(self.time_out,0.5)
self.expectedSeqnum = 0
self.starttime = datetime.utcnow()
def time_out(self):
self.network_layer.send(self.oldpkt)
# "send" is called by application. Return true on success, false
# otherwise.
def send(self, msg):
# TODO: impl protocol to send packet from application layer.
# call self.network_layer.send() to send to network layer.
if self.seqnum == self.lastSeqnum+1:
sndpkt = helper.make_pkt(self.type,self.seqnum,msg)
self.oldpkt = sndpkt
self.network_layer.send(sndpkt)
self.seqnum += 1
self.timer.start()
return True
else:
return False
# "handler" to be called by network layer when packet is ready.
def handle_arrival_msg(self):
msg = self.network_layer.recv()
# TODO: impl protocol to handle arrived packet from network layer.
# call self.msg_handler() to deliver to application layer.
typ = struct.unpack('!h',msg[0:2])[0]
seqnum = struct.unpack('!h',msg[2:4])[0]
#if receive ACK
if typ == config.MSG_TYPE_ACK:
checksum = struct.pack('!h',helper.checksum(typ,seqnum,b''))
if msg[4:6] == checksum and self.lastSeqnum == seqnum - 1:
self.timer.stop()
self.lastSeqnum += 1
#if receive Data
elif typ == config.MSG_TYPE_DATA:
extract_data = msg[6:len(msg)]
checksum = struct.pack('!h',helper.checksum(typ,seqnum,extract_data))
if msg[4:6] == checksum and self.expectedSeqnum == seqnum:
self.msg_handler(extract_data)
endtime = datetime.utcnow()
diftime = endtime - self.starttime
print (str(diftime.seconds) + " seconds")
sndpkt = helper.make_pkt(config.MSG_TYPE_ACK,seqnum,b'')
self.network_layer.send(sndpkt)
self.expectedSeqnum += 1
else:
sndpkt = helper.make_pkt(config.MSG_TYPE_ACK,self.expectedSeqnum-1,b'')
self.network_layer.send(sndpkt)
# Cleanup resources.
def shutdown(self):
# TODO: cleanup anything else you may have when implementing this
# class.
self.network_layer.shutdown()
| true |
79b60ff9834c56d8be1734bc44bf25eb293691cf | Python | goocy/upscale-detector | /upscaleDetector.py | UTF-8 | 4,444 | 2.625 | 3 | [] | no_license | # This code was written by goocy and is licensed under Creative Commons BY-NC 3.0.
# https://creativecommons.org/licenses/by-nc/3.0/
# Commercial use is prohibited.
# core idea: https://github.com/0x09/resdet
# other helpful sources:
# https://techtutorialsx.com/2018/06/02/python-opencv-converting-an-image-to-gray-scale/
# https://stackoverflow.com/questions/13904851/use-pythons-scipy-dct-ii-to-do-2d-or-nd-dct
# sliding median: https://docs.scipy.org/doc/scipy/reference/generated/scipy.ndimage.median_filter.html
# hypothesis testing: https://stackoverflow.com/questions/48705448/z-score-calculation-from-mean-and-st-dev
# ffmpeg: https://github.com/kkroening/ffmpeg-python/blob/master/examples/README.md
# ffmpeg syntax: https://github.com/kkroening/ffmpeg-python/blob/master/examples/README.md#convert-sound-to-raw-pcm-audio
# unused sources:
# https://pywavelets.readthedocs.io/en/latest/ref/2d-dwt-and-idwt.html
# https://pywavelets.readthedocs.io/en/latest/ref/2d-decompositions-overview.html
import matplotlib.pyplot as plt
import skimage.transform
import scipy.fftpack
import scipy.ndimage
import scipy.stats
import numpy as np
import argparse
import random
import ffmpeg # the package is called ffmpeg-python
import tqdm
import cv2
# https://stackoverflow.com/questions/38332642/plot-the-2d-fft-of-an-image
def dctn(x, norm="ortho"):
for i in range(x.ndim):
x = scipy.fftpack.dct(x, axis=i, norm=norm)
return x
parser = argparse.ArgumentParser()
parser.add_argument('f')
args = parser.parse_args()
videoFilename = args.f
sampleCount = 200
# figure out metadata
probe = ffmpeg.probe(videoFilename)
video_info = next(stream for stream in probe['streams'] if stream['codec_type'] == 'video')
width = int(video_info['width'])
height = int(video_info['height'])
duration = int(float(probe['format']['duration']) * 1000)
print('Original resolution: {:d}x{:d}'.format(width, height))
videoShape = [width, height]
videoRatio = width / height
# extract random frames
timestamps = random.choices(range(duration-100), k=sampleCount)
xCurves = [0,]*sampleCount
yCurves = [0,]*sampleCount
print('Sampling random frames from the video...')
for i, timestamp in enumerate(tqdm.tqdm(timestamps)):
imageBuffer, _ = (
ffmpeg.input(videoFilename, ss='{:.0f}'.format(timestamp/1000))
.output('pipe:', vframes=1, format='rawvideo', pix_fmt='rgb24', loglevel="quiet")
.run(capture_stdout=True)
)
if len(imageBuffer) > 0:
image = np.frombuffer(imageBuffer, np.uint8).reshape([height, width, 3])
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
dct = dctn(gray)
squaredDCT = np.power(dct,2)
xDCT = np.sum(squaredDCT, axis=0)
yDCT = np.sum(squaredDCT, axis=1)
xDCT[xDCT == 0] = np.min(xDCT) / 10
yDCT[yDCT == 0] = np.min(yDCT) / 10
xCurves[i] = np.log10(xDCT)
yCurves[i] = np.log10(yDCT)
# analyze peaks
xCurve = np.mean(xCurves, axis=0)
yCurve = np.mean(yCurves, axis=0)
rawCurves = [xCurve, yCurve]
spikyCurves = []
for axis in [0,1]:
end = videoShape[axis]
rawCurve = rawCurves[axis]
start = round(end / 3) # only look below 3x upscaling
smoothCurve = scipy.ndimage.median_filter(rawCurve, size=9, mode='reflect')
spikyCurve = rawCurve - smoothCurve
spikyCurves.append(spikyCurve)
#plt.plot(spikyCurve)
#plt.xlim([start, end])
#amplitude = min(spikyCurve[start:])*1.1
#plt.ylim(amplitude, -amplitude)
#plt.show()
# combine the two curves
peakCount = 5
squeezedCurve = skimage.transform.resize(spikyCurves[0], (height,), preserve_range=True, mode='constant')
combinedCurve = spikyCurves[1] + squeezedCurve
minimumIndices = np.argsort(combinedCurve)[:peakCount]
peakConfidences = np.zeros((peakCount))
for i, minimumIndex in enumerate(minimumIndices):
minimumValue = combinedCurve[minimumIndex]
confidence = 1-scipy.stats.norm(np.mean(combinedCurve), np.std(combinedCurve)).cdf(minimumValue)
peakConfidences[i] = confidence
# extract the most likely original video resolutions
if any(peakConfidences > 0.8):
print('Most likely original resolutions:')
for i, confidence in enumerate(peakConfidences):
if confidence > 0.8:
yIndex = minimumIndices[i]
xIndex = yIndex * videoRatio
confidenceText = '{:.3f}%'.format(confidence*100)
print('x: {:.0f}, y: {:.0f} ({})'.format(xIndex, yIndex, confidenceText))
else:
print('Video is most likely not upscaled.') | true |
3bda405733a86334fe0e8e647e5e60215c0e49e2 | Python | stefantkeller/errorvalues | /errvallist.py | UTF-8 | 4,697 | 2.984375 | 3 | [
"MIT"
] | permissive | #! /usr/bin/python2.7
# -*- coding: utf-8 -*-
'''
Work with whole lists of errval's:
[v0+-e0, v1+-e1, ...]
'''
import numpy as np
from errval import *
class errvallist(list):
def __init__(self,vals=[],errs=0,printout='latex'):
if isinstance(vals,errvallist):
self.__errl = vals
elif isinstance(vals,(list,np.ndarray)) or isinstance(errs,(list,np.ndarray)):
if isinstance(vals,(list,np.ndarray)) and isinstance(errs,(list,np.ndarray)):
if len(vals)==len(errs):
self.__errl = [errval(vals[j],errs[j],printout) for j in xrange(len(vals))]
elif isinstance(vals,(list,np.ndarray)) and isinstance(errs,(int,float,long)):
# note: this also covers the case vals is a list of errval entries,
# in this case the errs are ignored and the result is a conversion of list to errvallist
self.__errl = [errval(v,errs,printout) for v in vals]
elif isinstance(vals,(int,float,long)) and isinstance(errs,(list,np.ndarray)):
self.__errl = [errval(vals,e,printout) for e in errs]
else:
raise ValueError, 'Cannot assign input data: {0}'.format(type(vals))
def __getitem__(self,key):
return self.__errl[key]
def __setitem__(self,key,value):
self.__errl[key]=value
def __getslice__(self,i,j):
# https://docs.python.org/2/reference/datamodel.html#object.__getslice__
# Deprecated since version 2.0
# but since I derive from list I have to ignore this deprecation...
return errvallist(self.__errl[i:j])
def __str__(self):
outp = '['
for evl in self.__errl:
outp += evl.__str__()+','
outp = outp[:-1]+']'
return outp
def __iter__(self):
# to make the errvallist iterable
# i.e. to make the 'in' possible in 'for err in errvallist:'
for err in self.__errl:
yield err
def __len__(self):
return len(self.__errl)
def __add__(self,other):
if isinstance(other,(errvallist,list)) and len(self)==len(other):
errvall = [self[j]+other[j] for j in xrange(len(self))]
elif isinstance(other,(int,float,long,errval)):
errvall = [s+other for s in self]
else:
raise TypeError, 'unsupported operand type(s) for +: errval with {0}'.format(type(other))
return errvallist(errvall)
def __radd__(self,other):
return self.__add__(other)
def __sub__(self,other):
if isinstance(other,(errvallist,list)) and len(self)==len(other):
errvall = [self[j]-other[j] for j in xrange(len(self))]
elif isinstance(other,(int,float,long,errval)):
errvall = [s-other for s in self]
else:
raise TypeError, 'unsupported operand type(s) for -: errval with {0}'.format(type(other))
return errvallist(errvall)
def __rsub__(self,other):
return -1*self.__sub__(other)
def __mul__(self,other):
if isinstance(other,(errvallist,list)) and len(self)==len(other):
errvall = [self[j]*other[j] for j in xrange(len(self))]
elif isinstance(other,(int,float,long,errval)):
errvall = [s*other for s in self]
else:
raise TypeError, 'unsupported operand type(s) for *: errval with {0}'.format(type(other))
return errvallist(errvall)
def __rmul__(self,other):
return self.__mul__(other)
def __div__(self,other):
if isinstance(other,(errvallist,list)) and len(self)==len(other):
errvall = [self[j]/other[j] for j in xrange(len(self))]
elif isinstance(other,(int,float,long,errval)):
errvall = [s/other for s in self]
else:
raise TypeError, 'unsupported operand type(s) for /: errval with {0}'.format(type(other))
return errvallist(errvall)
def __rdiv__(self,other):
return 1.0/self.__div__(other)
def append(self,value):
self.__errl.append(value)
def round(self,n=0):
# returns new instance
return errvallist([errv.round(n) for errv in self])
'''
Depending on the circumstances the code incorporating this class
may want to use different names for the following functions:
'''
def v(self): return np.array([ev.val() for ev in self])
def val(self): return self.v()
def vals(self): return self.v()
def values(self): return self.v()
def e(self): return np.array([ev.err() for ev in self])
def err(self): return self.e()
def errs(self): return self.e()
def errors(self): return self.e()
| true |
54cd3270c8aeade571c21eb1f232505e838dc940 | Python | jfmacedo91/curso-em-video | /python/ex039.py | UTF-8 | 673 | 4 | 4 | [] | no_license | from datetime import date
print('\033[33m{:=^51}\033[m'.format(' Exercício 039 '))
nasc = int(input('Ano de nascimento: '))
hoje = date.today().year
print('Quem nasceu em {} tem \033[33m{} anos\033[m em {}.'.format(nasc, hoje-nasc, hoje))
if (hoje-nasc) < 18:
print('Ainda faltam \033[33m{} anos\033[m para o alistamento.\nSeu alistamento será em \033[33m{}\033[m.'.format(18-(hoje-nasc), nasc+18))
elif (hoje-nasc) > 18:
print('Você já deveria ter se alistado há \033[33m{} anos\033[m.\nSeu alistamento foi em \033[33m{}\033[m.'.format((hoje-nasc)-18, nasc+18))
elif (hoje-nasc) == 18:
print('Você deve se alistar \033[33mIMEDIATAMENTE!\033[m') | true |
9b092a47f3b1b55f64e665f6e40bc76074200670 | Python | psy1088/Algorithm | /Baekjoon/Implementation/2753.py | UTF-8 | 151 | 3.34375 | 3 | [] | no_license | def leap_year(n):
if N % 4 == 0:
if N % 100 != 0 or N % 400 == 0:
return 1
return 0
N = int(input())
print(leap_year(N))
| true |
d58459970c645bd588cea6493b12b1222ec0e4f3 | Python | justinmoon/raft | /raft/five_thread_demo.py | UTF-8 | 2,962 | 2.890625 | 3 | [] | no_license | import threading
import queue
import logging
import random
import time
logging.basicConfig(level="INFO", format='%(threadName)-6s | %(message)s')
def send_msg(thread_id, queues, msg):
for i in range(5):
if i != thread_id:
queues[i].put(msg)
print("sent to ", i)
def heartbeat(thread_id, queues):
print('inside heartbeat')
# send the messages
msg = {'command': 'append_entries', 'node_id': thread_id}
send_msg(thread_id, queues, msg)
# schedule the next heartbeat
secs = random.random() * .7
threading.Timer(secs, heartbeat, (thread_id, queues)).start()
def target(thread_id, queues):
thread_name = threading.currentThread().name
last_msg = time.time() + 100
current_leader = None
is_candidate = False
voting_tally = [None] * 5
# all threads need to spawn
# one needs to request an election
# others should accept their request, setting current_leader
# when threshold crossed, start leading
# node 0 calls election (hack)
if thread_id == 0:
msg = {'command': 'request_vote', 'candidate_id': 0}
is_candidate = True
send_msg(thread_id, queues, msg)
print("SENT")
while True:
q = queues[thread_id]
msg = q.get()
print(thread_id, 'got', msg)
if time.time() > last_msg + 1:
raise TimeoutError()
if msg['command'] == 'request_vote':
res = {'command': 'respond_vote', 'node_id': thread_id, 'decision': True}
queues[msg['candidate_id']].put(res)
print(thread_id, 'voted for', msg['candidate_id'])
if msg['command'] == 'respond_vote':
if is_candidate:
voting_tally[msg['node_id']] = msg['decision']
if voting_tally.count(True) >= 2: # FIXME: this should start from 3 w/ candidate voting for self
# set global variables
current_leader = thread_id
is_candidate = False
voting_tally = [None] * 5
print(thread_id, 'is now candidate')
msg = {'command': 'append_entries', 'node_id': thread_id}
send_msg(thread_id, queues, msg)
heartbeat(thread_id, queues)
else:
print(msg['node_id'], 'voted for', thread_id)
else:
print('NOT A CANDIDATE')
if msg['command'] == 'append_entries':
# if msg['node_id'] == current_leader:
print('received "append_entries" from', msg['node_id'])
last_msg = time.time()
def run():
queues = [queue.Queue() for _ in range(5)]
threads = [
threading.Thread(target=target, args=(i, queues))
for i in range(5)
]
for thread in threads:
thread.start()
# for thread in threads:
# thread.join()
if __name__ == '__main__':
run()
| true |
0d374c89d3df9950e0fb8a1074b6a5c9c9899ffc | Python | pieterbork/operationbgp | /server/manage_server.py | UTF-8 | 983 | 3.3125 | 3 | [] | no_license | import sys
def kill_server(color):
f = open('colors.txt', 'r')
colors = f.readline().strip().split(',')
colors.remove(color)
f.close()
f = open('colors.txt', 'w')
writeline = ",".join(colors).strip()
f.write(writeline)
f.close()
def add_server(color):
f = open('colors.txt', 'r')
colors = f.readline().strip().split(',')
f.close()
if color in colors:
print("server is already alive!")
else:
f = open('colors.txt', 'w')
colors.append(color)
writeline = ",".join(colors).strip()
f.write(writeline)
f.close()
def main():
args = sys.argv
if len(args) < 3:
print("not enough args")
quit()
op = args[1]
num = int(args[2])
colors = ["red", "green", "blue"]
try:
color = colors[num]
except:
print("server not found!")
if op == "kill":
kill_server(color)
elif op == "add":
add_server(color)
main()
| true |
a0516d8f331002f71c0b1d27f3be2f42f04de27e | Python | ons-eq-team/eq-questionnaire-runner | /tests/integration/questionnaire/test_questionnaire_is_skipping_question.py | UTF-8 | 1,976 | 2.53125 | 3 | [
"MIT",
"LicenseRef-scancode-proprietary-license"
] | permissive | from tests.integration.integration_test_case import IntegrationTestCase
class TestQuestionnaireChangeAnswer(IntegrationTestCase):
def test_final_summary_not_available_if_any_question_incomplete(self):
# Given I launched a survey and have not answered any questions
self.launchSurvey("test_is_skipping_to_end")
# When I try access the final summary
self.get("questionnaire/summary")
# Then I should be redirected to the first incomplete question in the survey
self.assertInBody("Were you forced to complete section 1?")
def test_final_summary_not_available_after_invalidating_section(self):
# Given I launched a survey and have answered all questions
self.launchSurvey("test_is_skipping_to_end")
self.post({"test-skipping-answer": "No"})
self.assertInBody("Check your answers before continuing")
self.assertInBody("Were you forced to complete section 1?")
self.post()
self.post({"test-skipping-answer-2": "No"})
self.assertInBody("Check your answers before continuing")
self.assertInBody("Were you forced to complete section 2?")
self.post()
self.assertInBody("Check your answers before submitting")
self.assertInBody("Were you forced to complete section 1?")
self.assertInBody("Were you forced to complete section 2?")
self.assertInBody("Submit answers")
# When I invalidate any block and try access the final summary
self.get("questionnaire/test-skipping-forced-2#test-skipping-answer-2")
self.post({"test-skipping-answer-2": "Yes"})
self.get("questionnaire/test-skipping-forced#test-skipping-answer")
self.post({"test-skipping-answer": "Yes"})
self.get("questionnaire/summary")
# Then I should be redirected to the first incomplete question in the survey
self.assertInBody("What would incentivise you to complete this section?")
| true |
591e1997f1a25b437d3981f853b8082dfa6b880d | Python | krishcdbry/Python-Basics | /fib.py | UTF-8 | 179 | 3.90625 | 4 | [] | no_license | print "Fibonacci Series in Python \n \n"
n = int(raw_input("Enter the range of fib"))
a,b,co,c=0,1,2,0
print "%s %s" %(a,b)
while(co<n):
c = a+b
a,b = b,c
print c
co += 1 | true |
4b68a52f2a8861f2d942f067a2eee824063f3544 | Python | ississ0/PythonWebProject | /OC_1206/function0.py | UTF-8 | 335 | 4.46875 | 4 | [] | no_license |
def 더하기(숫자1, 숫자2):
결과 = 숫자1 + 숫자2
return 결과
결과1 = 더하기(2,3)
print(결과1)
결과2 = 더하기('아','야')
print(결과2)
def sayHello():
return "Hello"
# 재료가 없는 함수
result=sayHello()
print(result)
# 리턴값이 없는 함수
def printHello() :
print("Hello!") | true |
f9f72d95660bc778ef69475c2d4d58f8aa9286d5 | Python | alina-timir/Playground-1 | /Pyton_stuff/HarveyMuddX/4is4.py | UTF-8 | 285 | 3.578125 | 4 | [] | no_license | __author__ = 'darakna'
print("Zero is", 4+4-4-4)
print("One is", 4/4)
print("Two is", (4+4) / 4)
print("Three is", int((4*4-4)/4))
print("Four is", 4)
print("Five is", 4+4/4)
print("Six is", 4 + (4+4) / 4)
print("Seven is", 4+4 - 4/4)
print("Eight is", 4+4)
print("Nine is", 4+4 + 4/4) | true |
63426ae1034dbf7d17f132bbbdf091c32b760a87 | Python | trentcraighart/undergradCoursework | /assignments160/calculator.py | UTF-8 | 1,083 | 3.9375 | 4 | [] | no_license | select = 1
start = 'y'
cookie = 1
print("Hello and welcome to my calculator!");
print("Note, this calculator only works with whole numbers");
print("Please refrain from using any 'words'");
while (start == 'y'):
while (cookie == 1):
opp = str(input("Imput Operand: + - / * % **: "));
if opp == '+':
cookie = 0
elif opp == '-':
cookie = 0
elif opp == '/':
cookie = 0
elif opp == '*':
cookie = 0
elif opp == '%':
cookie = 0
elif opp == '**':
cookie = 0
elif opp == 'Wake up Neo':
print("follow the white rabbit");
elif opp == 'words':
print("You know what you've done :p");
num1 = int(input("What is your first number: "));
num2 = int(input("What is your second number: "));
if opp == '+':
print(num1 + num2);
elif opp == '-':
print(num1 - num2);
elif opp == '/':
print(num1 / num2);
elif opp == '*':
print(num1 * num2);
elif opp == '**':
print(num1 ** num2);
elif opp == '%':
print(num1 % num2);
cookie = 1;
start = str(input("Press y to contineu, any other key to quit "));
print("Thanks for using my calculator!");
| true |
8838f3e2fdc5d6ea5a9103a1a6366c9105ffcc5f | Python | AK-1121/code_extraction | /python/python_5786.py | UTF-8 | 133 | 2.609375 | 3 | [] | no_license | # numpy: Replacing values in a recarray
for fieldname in a.dtype.names:
ind = a[fieldname] == ''
a[fieldname][ind] = '54321'
| true |
b45bba71d49d685d6a915aba601cebee684d2614 | Python | DuckHunt-discord/DuckHunt-Community-Rewrite | /cogs/basics.py | UTF-8 | 3,125 | 2.71875 | 3 | [] | no_license | import datetime
import random
import discord
from discord.ext import commands
from cogs.helpers.checks import have_required_level
class Basics:
"""
Really basic commands of the bot
Normally one liners or misc stuff that can't go anywhere else.
"""
def __init__(self, bot):
self.bot = bot
def get_bot_uptime(self):
now = datetime.datetime.utcnow()
delta = now - self.bot.uptime
hours, remainder = divmod(int(delta.total_seconds()), 3600)
minutes, seconds = divmod(remainder, 60)
days, hours = divmod(hours, 24)
if days:
fmt = '{d} days, {h} hours, {m} minutes, and {s} seconds'
else:
fmt = '{h} hours, {m} minutes, and {s} seconds'
return fmt.format(d=days, h=hours, m=minutes, s=seconds)
@commands.command()
@have_required_level(1)
async def uptime(self, ctx):
"""
Tells you how long the bot has been up for.
"""
await ctx.send_to('Uptime: **{}**'.format(self.get_bot_uptime()))
@commands.command()
@have_required_level(6)
async def level(self):
"""
Nobody can do this anyway, the required permission is too high.
"""
pass
@commands.command()
@have_required_level(1)
async def random_staff(self, ctx, count=2):
"""
Give you the names of `count` members of staff.
"""
roles = [discord.utils.get(ctx.guild.roles, name=role_name) for role_name in ['Owner', 'Moderator', 'Translator', 'Bug Hunters', 'Proficient', 'Partner', 'Donator', 'DuckEnigma Event Winner 2018']]
possibles = [m for m in ctx.guild.members if any([r in m.roles for r in roles])]
selected = [m.name + "#" + m.discriminator for m in random.choices(possibles, k=count)]
await ctx.send_to(', '.join(selected))
@commands.command()
@have_required_level(2)
async def webinterface_roles(self, ctx):
roles = {
'$admins' : 'Owner',
'$moderators' : 'Moderator',
'$translators' : 'Translator',
'$bug_hunters' : 'Bug Hunters',
'$proficients' : 'Proficient',
'$partners' : 'Partner',
'$donators' : 'Donator',
'$enigma_event_winners_june_2018': 'DuckEnigma Event Winner 2018'
}
member_list = sorted(ctx.guild.members, key=lambda u: u.name)
for role_var in sorted(roles.keys()):
role_name = roles[role_var]
role = discord.utils.get(ctx.guild.roles, name=role_name)
php_code = '```'
php_code += f"{role_var} = array(\n"
for member in member_list:
if role in member.roles:
php_code += f"{member.id}, // {member.name}#{member.discriminator}\n"
php_code += ");\n\n"
php_code += '```'
await ctx.send(php_code, delete_after=120)
def setup(bot):
bot.add_cog(Basics(bot))
| true |
cb17233783759a1639e901182e3ee55693415dd6 | Python | ssd04/ml-project-template | /src/misc/aws.py | UTF-8 | 3,630 | 2.640625 | 3 | [
"MIT"
] | permissive | import os
import boto3
from botocore.exceptions import ClientError
from botocore.config import Config
from loguru import logger
aws_region = os.environ.get("AWS_REGION")
config = Config(retries={"max_attempts": 5, "mode": "adaptive"})
def get_ssm_parameter_value(parameter_name):
try:
ssm = boto3.client("ssm", region_name=aws_region, config=config)
parameter = ssm.get_parameter(Name=parameter_name, WithDecryption=True)
except client.exceptions.ParameterNotFound:
logger.exception(f"Parameter [{parameter_name}] not found")
else:
return parameter["Parameter"]["Value"]
def read_data_from_s3(bucket_name, object_key):
"""
Gets an object from a bucket.
:param bucket: The bucket that contains the object.
:param object_key: The key of the object to retrieve.
:return: The object data in bytes.
"""
try:
s3 = boto3.resource("s3")
bucket = s3.Bucket(bucket_name)
body = bucket.Object(object_key).get()["Body"].read()
post_data = body
if isinstance(body, (bytes, bytearray)):
try:
post_data = body
except IOError:
logger.exception("Expected file name or binary data, got '%s'.", body)
raise
logger.info(f"Got object '{object_key}' from bucket '{bucket.name}'.")
except ClientError:
logger.exception(
("Couldn't get object '%s' from bucket '%s'.", object_key, bucket.name)
)
raise
else:
return post_data
def save_data_to_s3(bucket_name, object_key, data):
"""
Upload data to a bucket and identify it with the specified object key.
:param bucket: The bucket to receive the data.
:param object_key: The key of the object in the bucket.
:param data: The data to upload. This can either be bytes or a string. When this
argument is a string, it is interpreted as a file name, which is
opened in read bytes mode.
"""
put_data = data
if isinstance(data, str):
try:
put_data = bytearray(data.encode("ascii"))
except IOError:
logger.exception("Expected file name or binary data, got '%s'.", data)
raise
try:
s3 = boto3.resource("s3")
bucket = s3.Bucket(bucket_name)
obj = bucket.Object(object_key)
obj.put(Body=put_data)
obj.wait_until_exists()
logger.info(f"Put object '{object_key}' to bucket '{bucket.name}'.")
except ClientError:
logger.exception(
"Couldn't put object '%s' to bucket '%s'.", object_key, bucket.name
)
raise
finally:
if getattr(put_data, "close", None):
put_data.close()
def get_sns_topic_by_name(client, topic_name):
try:
response = client.list_topics()
except ClientError as e:
logger.error(f"Failed with error: {e}")
raise
for topic in response["Topics"]:
topic_name_tmp = topic["TopicArn"].split(":")[-1]
if topic_name_tmp == topic_name:
return topic["TopicArn"]
return
def send_notification(topic_name, message, subject):
try:
client = boto3.client("sns", region_name=aws_region, config=config)
topic_arn = get_sns_topic_by_name(client, topic_name)
if topic_arn is not None:
client.publish(
TopicArn=topic_arn,
Message=message,
Subject=subject,
)
except ClientError as e:
logger.error(f"Failed with error: {e}")
raise
return
| true |
d9703600a0dbe0b06b8be713077647d8e748a20b | Python | stevenwongso/Python_Fundamental_DataScience | /5 Pandas/20f_pd_missingDate.py | UTF-8 | 363 | 2.765625 | 3 | [] | no_license |
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
df = pd.read_csv(
'20_dataTelkom.csv',
index_col=False,
parse_dates=['Tanggal']
)
df = df.set_index('Tanggal')
df = df.sort_index()
# print(df)
# ada tanggal yg missing krn holiday/weekend
df = df.resample('D').sum()
df = df.replace(0, np.NaN)
df = df.fillna('Haha')
print(df) | true |
d1abc0b57b38527bebc566dc8d30f2fefd10859f | Python | davidjuliancaldwell/ScientificSupercomputing | /Astro598bayesian/davidcaldwell_hw4/bayesian_functions_hw4.py | UTF-8 | 829 | 3.03125 | 3 | [] | no_license | import numpy as np
import random as random
import math
import re
import sys
import matplotlib.pyplot as plt
import seaborn as sns
from scipy.stats import beta
def beta_func(theta,a,b):
# beta prior
numSteps = 1000
theta_vector = np.linspace(0,1,numSteps)
vector_int = [x**(a-1)*(1-x)**(b-1) for x in theta_vector]
dx = 1/numSteps
coeff = 1/np.trapz(vector_int,dx=dx)
p_theta = coeff*(theta**(a-1)*(1-theta)**(b-1))
return p_theta
def plot_beta_func(a,b):
theta_list = np.linspace(0,1,1000)
output = [beta_func(theta,a,b) for theta in theta_list]
plt.figure()
plt.plot(theta_list,output)
plt.xlabel('Theta')
plt.ylabel('Probability')
plt.title('Probability for Beta Distribution for a = {}, b = {}'.format(a,b))
plt.savefig('beta_dist_a_{}_b_{}.png'.format(a,b))
| true |
762efbb738bd05133acd8fc9018495d2f3e26e99 | Python | KB-perByte/CodePedia | /Gen2_0_PP/Assignment/leetcode_combinationSumIV.py | UTF-8 | 1,775 | 2.828125 | 3 | [] | no_license | class Solution(object):
def combinationSum42(self, nums, target):
"""
:type nums: List[int]
:type target: int
:rtype: int
"""
def dfs(idx, cur, memo):
if cur == target:
return 1
if cur > target:
return 0
if (idx, cur) in memo:
return memo[idx, cur]
memo[idx, cur] = sum(dfs(i, cur + nums[i], memo) for i in range(n))
return memo[idx, cur]
n = len(nums)
return dfs(0, 0, {})
def combinationSum4(self, nums, target):
def dfs(cur, memo):
if cur == target:
return 1
if cur > target:
return 0
if cur in memo:
return memo[cur]
memo[cur] = sum(dfs(cur + nums[i], memo) for i in range(n))
return memo[cur]
n = len(nums)
return dfs(0, {})
def combinationSum42(self, nums, target):
def dfs(idx, cur, memo):
if idx == n:
return 1 if cur == target else 0
if cur > target:
return 0
if (idx, cur) in memo:
return memo[idx, cur]
memo[idx, cur] = dfs(idx + 1, cur, memo) + dfs(idx, cur + nums[idx], memo)
return memo[idx, cur]
n = len(nums)
return dfs(0, 0, {})
def combinationSum42(self, nums, target):
store = [0]*(target+1)
for i in range(target+1):
count = 0
for j in nums:
if i-j == 0:
count += 1
elif i-j > 0:
count += store[i-j]
store[i] = count
return store[target] | true |
07b51d4d5b29e115f6ccf90bf47e59610cd01744 | Python | frostyfeet/craigslist_crawler_lambda | /source/search_master.py | UTF-8 | 1,918 | 2.625 | 3 | [
"Apache-2.0"
] | permissive | import abc
import boto3
import pickle
from slackclient import SlackClient
class SearchMaster(object):
aws_region = "us-west-2"
s3_bucket_name = "temp-lambda-files"
temp_path = "/tmp/"
client = boto3.client('s3', region_name=aws_region)
def __init__(self, urls, s3_filename, slack_token):
self.urls = urls
self.s3_filename = s3_filename
self.old_data = self.read_s3_file()
self.slack_token = slack_token
def read_s3_file(self):
full_path = "{}{}".format(self.temp_path, self.s3_filename)
try:
self.client.download_file(self.s3_bucket_name, self.s3_filename, full_path)
pickle_in = open(full_path, "rb")
return pickle.load(pickle_in)
except Exception as e:
print("Returning empty list: {}".format(e))
return {
'Results': []
}
def write_s3_file(self):
try:
full_path = "{}{}".format(self.temp_path, self.s3_filename)
pickle_out = open(full_path, "wb")
pickle.dump(self.old_data, pickle_out)
pickle_out.close()
self.client.upload_file(full_path, self.s3_bucket_name, self.s3_filename)
return True
except Exception as e:
print("Error uploading file: {}".format(e))
return False
def add_new_results(self, new_results):
self.old_data['Results'] = self.old_data['Results'] + new_results
def get_new_results(self, new_results):
return [value for value in new_results if value not in self.old_data['Results']]
def send_slack(self, content):
sc = SlackClient(self.slack_token)
sc.api_call(
"chat.postMessage",
channel="#general",
text=content,
as_user=True,
mrkdwn=True,
username="CraigslistBot"
)
| true |
2b315440c12984a539b8eaaa7cb6881c0a931500 | Python | oy-vey/AISOBOI | /Python/lab1.py | UTF-8 | 191 | 3.5625 | 4 | [] | no_license | def cumsum(x):
"""Returns cumulative sums of a list"""
s = [0]
for i, v in enumerate(x):
a = v + s[i]
s.append(a)
return s
ml = [1, 2, 3]
print(cumsum(ml))
| true |
05a5320c6bb22301884fbf1621b1bba6630369f5 | Python | tianhm/poetry | /src/poetry/repositories/repository_pool.py | UTF-8 | 4,783 | 2.671875 | 3 | [
"MIT"
] | permissive | from __future__ import annotations
import enum
from collections import OrderedDict
from dataclasses import dataclass
from enum import IntEnum
from typing import TYPE_CHECKING
from poetry.repositories.abstract_repository import AbstractRepository
from poetry.repositories.exceptions import PackageNotFound
if TYPE_CHECKING:
from poetry.core.constraints.version import Version
from poetry.core.packages.dependency import Dependency
from poetry.core.packages.package import Package
from poetry.repositories.repository import Repository
class Priority(IntEnum):
# The order of the members below dictates the actual priority. The first member has
# top priority.
DEFAULT = enum.auto()
PRIMARY = enum.auto()
SECONDARY = enum.auto()
@dataclass(frozen=True)
class PrioritizedRepository:
repository: Repository
priority: Priority
class RepositoryPool(AbstractRepository):
def __init__(
self,
repositories: list[Repository] | None = None,
ignore_repository_names: bool = False,
) -> None:
super().__init__("poetry-repository-pool")
self._repositories: OrderedDict[str, PrioritizedRepository] = OrderedDict()
self._ignore_repository_names = ignore_repository_names
if repositories is None:
repositories = []
for repository in repositories:
self.add_repository(repository)
@property
def repositories(self) -> list[Repository]:
unsorted_repositories = self._repositories.values()
sorted_repositories = sorted(
unsorted_repositories, key=lambda prio_repo: prio_repo.priority
)
return [prio_repo.repository for prio_repo in sorted_repositories]
def has_default(self) -> bool:
return self._contains_priority(Priority.DEFAULT)
def has_primary_repositories(self) -> bool:
return self._contains_priority(Priority.PRIMARY)
def _contains_priority(self, priority: Priority) -> bool:
return any(
prio_repo.priority is priority for prio_repo in self._repositories.values()
)
def has_repository(self, name: str) -> bool:
return name.lower() in self._repositories
def repository(self, name: str) -> Repository:
name = name.lower()
if self.has_repository(name):
return self._repositories[name].repository
raise IndexError(f'Repository "{name}" does not exist.')
def add_repository(
self, repository: Repository, default: bool = False, secondary: bool = False
) -> RepositoryPool:
"""
Adds a repository to the pool.
"""
repository_name = repository.name.lower()
if self.has_repository(repository_name):
raise ValueError(
f"A repository with name {repository_name} was already added."
)
if default and self.has_default():
raise ValueError("Only one repository can be the default.")
priority = Priority.PRIMARY
if default:
priority = Priority.DEFAULT
elif secondary:
priority = Priority.SECONDARY
self._repositories[repository_name] = PrioritizedRepository(
repository, priority
)
return self
def remove_repository(self, name: str) -> RepositoryPool:
if not self.has_repository(name):
raise IndexError(f"Pool can not remove unknown repository '{name}'.")
del self._repositories[name.lower()]
return self
def package(
self,
name: str,
version: Version,
extras: list[str] | None = None,
repository_name: str | None = None,
) -> Package:
if repository_name and not self._ignore_repository_names:
return self.repository(repository_name).package(
name, version, extras=extras
)
for repo in self.repositories:
try:
return repo.package(name, version, extras=extras)
except PackageNotFound:
continue
raise PackageNotFound(f"Package {name} ({version}) not found.")
def find_packages(self, dependency: Dependency) -> list[Package]:
repository_name = dependency.source_name
if repository_name and not self._ignore_repository_names:
return self.repository(repository_name).find_packages(dependency)
packages: list[Package] = []
for repo in self.repositories:
packages += repo.find_packages(dependency)
return packages
def search(self, query: str) -> list[Package]:
results: list[Package] = []
for repository in self.repositories:
results += repository.search(query)
return results
| true |
f454c4064f7349048d69324b5362a991f7e58e17 | Python | Abis47/HH-PA2609-1 | /day2.py | UTF-8 | 5,139 | 3.53125 | 4 | [] | no_license | # -*- coding: utf-8 -*-
"""
Created on Mon Sep 27 19:01:34 2021
@author: vikas
"""
#Conditions
'''
if (conditional statement -> True)
{
Perform Line of Statements
Perform Line of Statements
Perform Line of Statements
}
'''
a = 10
b = 20
if (a > b):
print ("a is greater than b")
if (a>b):
print ("a is greater")
else:
print("b is greater")
marks = int(input("Enter Marks -> "))
if (marks > 90):
print("A")
elif (marks>80 and marks <=90):
print ("B")
elif (marks>70 and marks <=80):
print ("C")
elif (marks>60 and marks <=70):
print ("D")
else:
print ("F")
#Looping
for i in list(range(0,50,5)):
print(i)
name =[]
for i in list(range(0,5)):
n = input("Enter Name -> ")
name.append(n)
name =[]
n = input("Enter Name -> ")
name.append(n)
n = input("Enter Name -> ")
name.append(n)
n = input("Enter Name -> ")
name.append(n)
n = input("Enter Name -> ")
name.append(n)
n = input("Enter Name -> ")
name.append(n)
for i in range(1,11):
print("2 * {0} = {1}".format(i, 2*i))
print (j , " * ", i, " = ", j*i)
print (str(j) + " * " + str(i) + " = "+ str(j*i)
for j in range(1, 11):
for i in range(1,11):
print("{0} * {1} = {2}".format(j, i, j*i))
l2 = ['name', 'rno', 'branch']
data = []
for j in range(1, 6):
l3=[]
for i in range (0, 3):
l3.append(input("Enter detail {0}- >".format(l2[i])))
data.append(l3)
data
choice = 1
while (choice):
l3=[]
for i in range (0, 3):
l3.append(input("Enter detail {0}- >".format(l2[i])))
data.append(l3)
choice = int(input("Enter 1 for more details or enter 0 -> "))
data
while(True):
print("Aa")
cnt=0
while(cnt<10):
print("Aa")
cnt = cnt + 1
team = ['India', 'Australia','Nepal', 'England'] # 4elements list index 0-3
for i in team:
print(i)
#Break
for i in team:
if i == 'Nepal':
print (i, " In Condition ")
break
print (i, " Outer Condition")
#Continue
for i in team:
if i == 'Nepal':
print (i, " In Condition ")
continue
print (i, " Outer Condition")
l2 = ['name', 'rno', 'branch']
data=[]
while (True):
l3=[]
for i in range (0, 3):
l3.append(input("Enter detail {0}- >".format(l2[i])))
data.append(l3)
choice = int(input("Enter 1 for more details or enter 0 -> "))
if choice == 0:
break
data=[]
cnt=0
while (True):
l3=[]
while(cnt<3):
det= input("Enter detail {0}- >".format(l2[cnt]))
if (det==''):
continue
cnt = cnt +1
l3.append(det)
data.append(l3)
choice = int(input("Enter 1 for more details or enter 0 -> "))
if choice == 0:
break
#Functions
a = 10
b =20
def oper():
print( a+b)
print (a-b)
print (a*b)
oper()
oper()
oper()
def printHello():
print ("Hello")
printHello()
printHello()
printHello()
def printHello(name):
print ("Hello " + name)
printHello("Vikas")
printHello("AK")
printHello("VK")
def printHello(fname, lname):
print ("Hello " + fname +" "+ lname)
printHello("Vikas", "Khullar")
printHello("AK", "Kumar")
printHello("VK", "Gupta")
def dat(name, rno, branch= 'None'):
print (name, rno, branch)
a = dat("vk", 22, 'CSE')
a
dat("vk", 22)
def dat1(name, rno = 'None', branch= 'None'):
print (name, rno, branch)
return(name, rno, branch)
a = dat1("VK", 111, 'CSE')
a
def maximum(l1):
m=0
for i in l1:
if (m<i):
m=i
return (m)
lst = [6,5,3,8,1]
maximum(lst)
lst.sort()
lst
data=[]
def dat1():
l1=[]
l1.append(input('Enter Name -> '))
l1.append(input('Enter Rno -> '))
l1.append(input('Enter Branch -> '))
return(l1)
for i in range(0,5):
l1 = dat1()
data.append(l1)
l1 = dat1()
data.append(l1)
data
l1 = dat1()
data.append(l1)
l1 = dat1()
data.append(l1)
# Lambda
def f(x):
return(x**2)
f(2)
f(9)
fl = lambda x : x**2
fl(10)
fl1 = lambda x,y: (x+y, x-y)
fl1(10,20)
l1 = [5, 7, 22, 97, 54, 62, 77, 23, 73, 61]
lm1 = lambda x: x*2
l2=[]
for i in l1:
l2.append(lm1(i))
l2
#map, filter, reduce
l1 = [5, 7, 22, 97, 54, 62, 77, 23, 73, 61]
final_list = list(map(lambda x: x*2 , l1))
print(final_list)
l1 = list(range(1, 10))
l1
fl = list( map(lambda x: x+2, l1))
fl
l1 = [5, '', 22, 97, '' , 62, 77, '', 73, 61]
final_list = list(filter(lambda x: (x != '') , l1))
print(final_list)
l1 = [5, 7, 22, 97, 54, 62, 77, 23, 73, 61]
final_list = list(filter(lambda x: (x > 50) , l1))
print(final_list)
x =9
x/2
x % 2
l1 = [5, 7, 22, 97, 54, 62, 77, 23, 73, 61]
final_list = list(filter(lambda x: (x%2 == 0) , l1))
print(final_list)
from functools import reduce
li = [5, 8, 10, 20, 50, 100]
s1 = reduce(lambda x, y: x * y, li)
print (s1)
import random as rd
x = rd.randint(1, 100)
x
l1 = [111,222,333,444,555,666]
rd.choice(l1)
rd.choices(l1, k=3)
gender = ['M', 'F']
lst = rd.choices(gender, k=100)
len(lst)
| true |
5ad2d8ca1baaa8412168fa84c330d6f6a91786d8 | Python | architsharma97/InstanceRetrieval | /Scripts/Test/vocab_tree.py | UTF-8 | 521 | 2.671875 | 3 | [] | no_license | import numpy as np
from sklearn.decomposition import PCA
# loading visual words_reduced
visual_words = np.load('../visual_words.npy')
# PCA
pca=PCA(n_components=500)
visual_words_reduced=pca.fit_transform(visual_words)
print "Compute Unstructured Hierarchical Clustering..."
st = time.time()
ward = AgglomerativeClustering(n_clusters=6, linkage='ward').fit(visual_words_reduced)
elapsed_time = time.time() - st
label = ward.labels_
print "Elapsed time: %.2fs" %(elapsed_time)
print "Number of points: %i" %(label.size) | true |
4382f7b4eb24da5ce3c9a94d4fad2cb90846b7f4 | Python | Liquius/UraniumPower | /Script stuff/controlLuaStuff/controlLuaOnFissionReactorPlace.py | UTF-8 | 1,351 | 2.515625 | 3 | [] | no_license |
def makePrototype( xSize, ySize ):
p = ' elseif event.createdentity.name == "nuclear-fission-reactor-chest-%i" then\n' %(xSize*ySize)
p += ' results = game.findentitiesfiltered{area = {{x1, y1}, {x2, y2}}, name = "nuclear-fission-reactor-%i-by-%i"}\n' %(xSize, ySize)
p += ' if #results == 1 then\n'
p += ' if glob.LReactorAndChest == nil then\n'
p += ' glob.LReactorAndChest = {}\n'
p += ' end\n'
p += ' reactorAndChest = {true, true, true}\n'
p += ' reactorAndChest[1] = results[1]\n'
p += ' reactorAndChest[2] = event.createdentity\n'
p += ' reactorAndChest[3] = 0\n'
p += ' table.insert(glob.LReactorAndChest, reactorAndChest)\n'
p += ' game.player.print("Reactor access port successfully linked! Ready to accept fuel assemblies!")\n'
p += ' else\n'
p += ' event.createdentity.destroy()\n'
p += ' game.player.insert({name = "nuclear-fission-reactor-chest-%i", count = 1})\n' %(xSize*ySize)
p += ' game.player.print("Reactor access port cannot find a fission reactor! Returning to your inventory. Make sure you place your reactor first, and that you place your reactor access port next to it.")\n'
p += ' end\n'
return p
output = ''
for i in range(3,7):
for j in range(3,7):
output += makePrototype( i, j )
f = open('OnFissionReactorPlace.txt','w')
f.write( output )
f.close()
| true |
dbf93b804bc8c70d4c6e0a08fba02b7ea6763d68 | Python | hsqf/Course_Python | /chapter_09/iterable_iterator.py | UTF-8 | 1,127 | 4.09375 | 4 | [] | no_license | # -*- coding: utf-8 -*-
from collections.abc import Iterator
"""
迭代器和可迭代对象
"""
class Company(object):
def __init__(self, employee_list):
self.employee = employee_list
def __iter__(self):
return MyIterator(self.employee)
# def __getitem__(self, item):
# return self.employee[item]
# class MyIterator(object):
# def __iter__(self):
# return self
class MyIterator(Iterator):
"""
自定义迭代器
"""
def __init__(self, employee_list):
self.iter_list = employee_list
self.index = 0
def __next__(self):
# 真正返回迭代值的逻辑
try:
word = self.iter_list[self.index]
except IndexError:
raise StopIteration
self.index += 1
return word
if __name__ == '__main__':
name_list = ["tom", "bob", "jane", "321"]
company = Company(name_list)
# my_iter = iter(company)
# while True:
# try:
# print(next(my_iter))
# except StopIteration:
# pass
for info in company:
print(info)
| true |
f81e6dad032ba794bccb956ebeb298c6c5b54687 | Python | hanlinc27/Allertgen | /testing_api_python.py | UTF-8 | 1,727 | 2.90625 | 3 | [] | no_license | #https://devhints.io/xpath
from lxml import html
import requests
measures = [' cups ', 'cup ', ' teaspoons ', ' teaspoon ', ' tablespoons ', 'tablespoon ', ' plus ',\
' pinch ', ' stick ', ' for serving', ' ounces ', ' ounce ', ' stalk', ' for topping', 'Finely grated zest ', ' of ',\
'can ', ' pounds ', ' pound ', 'chopped', 'chopped ', 'halved', 'cooked ', ' to ', ' for frying', ' minced'\
'minced ', 'diced ', 'diced', ' smashed', ' for oiling the grill grates', 'small', 'lower', 'juiced'\
, ' zested ', 'shredded ', ' shredded', 'handful']
img_predict = "food" ###TODO: this needs to set as the response from Google Cloud
if (img_predict == "pad_thai"):
food_name = "pad-thai"
else:
food_name = img_predict
BASE_DIR = "https://www.foodnetwork.com/search/"
search_query = BASE_DIR + food_name + "-"
search_recipes = requests.get(search_query)
# if (search_recipes.status_code == 200):
tree1 = html.fromstring(search_recipes.content)
first_recipe = tree1.xpath('//h3[@class="m-MediaBlock__a-Headline"]/a/@href')[0]
print(first_recipe)
recipe_response = requests.get("https:" + first_recipe)
tree2 = html.fromstring(recipe_response.content)
ingredients = tree2.xpath('//p[@class="o-Ingredients__a-Ingredient"]/text()')
print(ingredients)
parsed_ingredients = []
for item in ingredients:
tmp = ''.join([i for i in item if (i.isalpha() or i==' ')]).lower()
for m in measures:
tmp = tmp.replace(m, '')
parsed_ingredients.append(tmp)
print(parsed_ingredients)
highlights = []
allergen = []
for item in parsed_ingredients:
for alle in allergen:
item = item.strip()
alle = alle.strip()
if (item.contains(alle)):
allergen.append(item) | true |
47fb09bb3b121070303d3cd7777671a938cad72f | Python | HarryPeach/bAmbi | /bambi/transformers/average_colour_transformer.py | UTF-8 | 2,240 | 2.984375 | 3 | [] | no_license | from bambi.layout import Layout
from bambi.transformers.base_transformer import BaseTransformer
from PIL import Image, ImageGrab
class AverageColourTransformer(BaseTransformer):
def _draw_box(self, img, bb_width, bb_height, start_x, start_y) -> str:
cropped_image = img.crop((start_x, start_y, start_x + bb_width,
start_y + bb_height))
cropped_image.thumbnail((1, 1), Image.NEAREST)
color = cropped_image.getpixel((0, 0))
return color
def transform(self, layout: Layout) -> None:
im = ImageGrab.grab()
bounding_box_width = im.width / (layout.dimensions[0] + 2)
bounding_box_height = im.height / (layout.dimensions[1] + 2)
for i in range(len(layout.top_state)):
# ir reverses the order of the colours
ir = len(layout.top_state) - 1 - i
start_x = (ir + 1) * bounding_box_width
start_y = 0
layout.top_state[i] = self._draw_box(im, bounding_box_width,
bounding_box_height, start_x, start_y)
for i in range(len(layout.right_state)):
# ir reverses the order of the colours
ir = len(layout.right_state) - 1 - i
start_x = im.width - bounding_box_width
start_y = (ir + 1) * bounding_box_height
layout.right_state[i] = self._draw_box(im, bounding_box_width,
bounding_box_height, start_x, start_y)
for i in range(len(layout.bottom_state)):
start_x = (i + 1) * bounding_box_width
start_y = im.height - bounding_box_height
layout.bottom_state[i] = self._draw_box(im, bounding_box_width,
bounding_box_height, start_x, start_y)
for i in range(len(layout.left_state)):
# ir reverses the order of the colours
# ir = len(layout.left_state) - 1 - i
start_x = 0
start_y = (i + 1) * bounding_box_height
layout.left_state[i] = self._draw_box(im, bounding_box_width,
bounding_box_height, start_x, start_y) | true |
60cf0dbd014fa9946ce3a137daecf8ccbb0dab8e | Python | EdwinKato/bucket-list | /backend/api/tests/test_create_item.py | UTF-8 | 1,192 | 2.546875 | 3 | [
"MIT"
] | permissive | import json
from api.test import BaseTestCase
class TestCreateItem(BaseTestCase):
def test_create_item_in_bucket_list(self):
bucket_list_one = {
"description": "Movies i have to watch by the end of the week",
"status": "Pending",
"title": "Entertainment",
"user_id": 1
}
self.client.post('/api/v1/bucketlists',
headers={
'Authorization': 'JWT ' + self.token
},
data=json.dumps(bucket_list_one),
content_type='application/json')
item = {
"description": "Horror movies",
"status": "Pending",
"title": "Wrong turn 6"
}
response = self.client.post('/api/v1/bucketlists/1/items',
headers={
'Authorization': 'JWT ' + self.token
},
data=json.dumps(item),
content_type='application/json')
self.assertIn("Wrong turn 6", str(response.data))
| true |
e9b5f528933c97bb17bbb44586fc184cb4e1198d | Python | DocenkoG/price_eyevis | /eyevis.py | UTF-8 | 11,885 | 2.65625 | 3 | [] | no_license | # -*- coding: UTF-8 -*-
import os
import os.path
import logging
import logging.config
import sys
import configparser
import time
import shutil
import openpyxl # Для .xlsx
#import xlrd # для .xls
from price_tools import getCellXlsx, getCell, quoted, dump_cell, currencyType, openX, sheetByName
import csv
import requests, lxml.html
def getCellXlsx( row # номер строки
, col # номер колонки
, isDigit # Признак, числовое ли значение нужно из этого поля
, sheet # лист XLSX
):
'''
Функция возвращает значение xls-ячейки в виде строки.
Для цифровых ячеек делается предварительное преобразование
в число (пустые и нечисловые значения преобразуются в "0")
'''
ccc = sheet.cell(row=row, column=col)
cellType = ccc.data_type
cellValue = ccc.value
if (isDigit == 'Y') :
if (cellValue == None) :
ss = '0'
elif (cellType in ('n')) : # numeric
if int(cellValue) == cellValue:
ss = str(int(cellValue))
else :
ss = str(cellValue)
else :
# ss = '0'
try:
t = cellValue
t = t.replace('.','')
t = t.replace(',','.')
t = t.replace('$','')
ss = str(float(t))
except ValueError as e:
ss='0.01'
else :
if (cellValue == None) :
ss = ''
elif (cellType in ('n')) : # numeric
if int(cellValue) == cellValue:
ss = str(int(cellValue))
else :
ss = str(cellValue)
else :
ss = str(cellValue)
return ss
def nameToId(value) :
result = ''
for ch in value:
if (ch != " " and ch != "/" and ch != "\\" and ch != '_' and ch != "," and
ch != "'" and ch != "." and ch != "-" and ch != "!" and ch != "@" and
ch != "#" and ch != "$" and ch != "%" and ch != "^" and ch != "&" and
ch != "*" and ch != "(" and ch != ")" and ch != "[" and ch != "]" and
ch != "{" and ch != ":" and ch != '"' and ch != ";" ) :
result = result + ch
length = len(result)
if length > 50 :
point = int(length/2)
result = result[:13] + result[point-12:point+13] + result[-12: ]
return result
def getXlsString(sh, i, in_columns_j):
impValues = {}
for item in in_columns_j.keys() :
j = in_columns_j[item]-1
if item in ('закупка','продажа','цена1') :
if getCell(row=i, col=j, isDigit='N', sheet=sh).find('Звоните') >=0 :
impValues[item] = '0.1'
else :
impValues[item] = getCell(row=i, col=j, isDigit='Y', sheet=sh)
#print(sh, i, sh.cell( row=i, column=j).value, sh.cell(row=i, column=j).number_format, currencyType(sh, i, j))
elif item == 'валюта_по_формату':
impValues[item] = currencyType(row=i, col=j, sheet=sh)
else:
impValues[item] = getCell(row=i, col=j, isDigit='N', sheet=sh)
return impValues
def getXlsxString(sh, i, in_columns_j):
impValues = {}
for item in in_columns_j.keys() :
j = in_columns_j[item]
if item in ('закупка','продажа','цена','цена1') :
if getCellXlsx(row=i, col=j, isDigit='N', sheet=sh)=='':
j = j+1 # иногда цена бывает в следующей колонке.
if getCellXlsx(row=i, col=j, isDigit='N', sheet=sh).find('request') >=0 :
impValues[item] = '0.1'
else :
impValues[item] = getCellXlsx(row=i, col=j, isDigit='Y', sheet=sh)
#print(sh, i, sh.cell( row=i, column=j).value, sh.cell(row=i, column=j).number_format, currencyType(sh, i, j))
elif item == 'валюта_по_формату':
impValues[item] = currencyType(row=i, col=j, sheet=sh)
elif item == 'short_descr':
if getCellXlsx(row=i, col=j, isDigit='N', sheet=sh)=='':
j = 3 # иногда краткое описание бывает в колонке "С"
impValues['short_descr'] = getCellXlsx(row=i, col=j, isDigit='N', sheet=sh)
elif item == 'long_descr':
long_descr=[]
for k in range(7, 45):
tempVal = getCellXlsx(row=i, col=k, isDigit='N', sheet=sh)
if tempVal != '':
long_descr.append(tempVal)
impValues['long_descr'] = '; '.join(long_descr)
elif item == 'код_' :
impValues['код_'] = getCellXlsx(row=i, col=j, isDigit='N', sheet=sh).lstrip('0')
else:
impValues[item] = getCellXlsx(row=i, col=j, isDigit='N', sheet=sh)
return impValues
def convert_excel2csv(cfg):
csvFName = cfg.get('basic','filename_out')
priceFName= cfg.get('basic','filename_in')
sheetName = cfg.get('basic','sheetname')
log.debug('Reading file ' + priceFName )
sheet = sheetByName(fileName = priceFName, sheetName = sheetName)
if not sheet :
log.error("Нет листа "+sheetName+" в файле "+ priceFName)
return False
log.debug("Sheet "+sheetName)
out_cols = cfg.options("cols_out")
in_cols = cfg.options("cols_in")
out_template = {}
for vName in out_cols :
out_template[vName] = cfg.get("cols_out", vName)
in_cols_j = {}
for vName in in_cols :
in_cols_j[vName] = cfg.getint("cols_in", vName)
csvFNameRUR =csvFName[:-4]+'_RUR'+csvFName[-4:]
csvFNameEUR =csvFName[:-4]+'_EUR'+csvFName[-4:]
csvFNameUSD =csvFName[:-4]+'_USD'+csvFName[-4:]
outFileRUR = open( csvFNameRUR, 'w', newline='', encoding='CP1251', errors='replace')
outFileEUR = open( csvFNameEUR, 'w', newline='', encoding='CP1251', errors='replace')
outFileUSD = open( csvFNameUSD, 'w', newline='', encoding='CP1251', errors='replace')
csvWriterRUR = csv.DictWriter(outFileRUR, fieldnames=out_cols )
csvWriterEUR = csv.DictWriter(outFileEUR, fieldnames=out_cols )
csvWriterUSD = csv.DictWriter(outFileUSD, fieldnames=out_cols )
csvWriterRUR.writeheader()
csvWriterEUR.writeheader()
csvWriterUSD.writeheader()
recOut ={}
grpName = ''
subGrpName = ''
for i in range(2, sheet.max_row +1) : # xlsx
i_last = i
try:
impValues = getXlsxString(sheet, i, in_cols_j) # xlsx
if sheet.cell( row=i, column=in_cols_j['группа_']).font.sz==16: # группа
grpName = impValues['группа_']
subGrpName = ''
print(grpName)
continue
if sheet.cell( row=i, column=in_cols_j['подгруппа']).font.sz==12: # подгруппа
subGrpName = impValues['подгруппа']
continue
if impValues['код_']=='' or sheet.cell( row=i, column=in_cols_j['код_']).font.bold==True : # Пустая строка
#print( 'Пустая строка. i=',i, impValues )
continue
else : # Обычная строка
impValues['группа_'] = grpName
impValues['подгруппа'] = subGrpName
for outColName in out_template.keys() :
shablon = out_template[outColName]
for key in impValues.keys():
if shablon.find(key) >= 0 :
shablon = shablon.replace(key, impValues[key])
if (outColName == 'закупка') and ('*' in shablon) :
p = shablon.find("*")
vvv1 = float(shablon[:p])
vvv2 = float(shablon[p+1:])
shablon = str(round(vvv1 * vvv2, 2))
recOut[outColName] = shablon.strip()
#if 'RUR'==recOut['валюта'] :
# csvWriterRUR.writerow(recOut)
#elif 'USD'==recOut['валюта'] :
# csvWriterUSD.writerow(recOut)
#elif 'EUR'==recOut['валюта'] :
csvWriterEUR.writerow(recOut)
except Exception as e:
print(e)
if str(e) == "'NoneType' object has no attribute 'rgb'":
pass
else:
log.debug('Exception: <' + str(e) + '> при обработке строки ' + str(i) +'.' )
log.info('Обработано ' +str(i_last)+ ' строк.')
outFileRUR.close()
outFileUSD.close()
def config_read( cfgFName ):
cfg = configparser.ConfigParser(inline_comment_prefixes=('#'))
if os.path.exists('private.cfg'):
cfg.read('private.cfg', encoding='utf-8')
if os.path.exists(cfgFName):
cfg.read( cfgFName, encoding='utf-8')
else:
log.debug('Нет файла конфигурации '+cfgFName)
return cfg
def is_file_fresh(fileName, qty_days):
qty_seconds = qty_days *24*60*60
if os.path.exists( fileName):
price_datetime = os.path.getmtime(fileName)
else:
log.error('Не найден файл '+ fileName)
return False
if price_datetime+qty_seconds < time.time() :
file_age = round((time.time()-price_datetime)/24/60/60)
log.error('Файл "'+fileName+'" устарел! Допустимый период '+ str(qty_days)+' дней, а ему ' + str(file_age) )
return False
else:
return True
def make_loger():
global log
logging.config.fileConfig('logging.cfg')
log = logging.getLogger('logFile')
def processing(cfgFName):
log.info('----------------------- Processing '+cfgFName )
cfg = config_read(cfgFName)
filename_out = cfg.get('basic','filename_out')
filename_in = cfg.get('basic','filename_in')
convert_excel2csv(cfg)
def main( dealerName):
""" Обработка прайсов выполняется согласно файлов конфигурации.
Для этого в текущей папке должны быть файлы конфигурации, описывающие
свойства файла и правила обработки. По одному конфигу на каждый
прайс или раздел прайса со своими правилами обработки
"""
make_loger()
log.info(' '+dealerName )
if os.path.exists('getting.cfg'):
cfg = config_read('getting.cfg')
filename_new = cfg.get('basic','filename_new')
rc_download = False
if cfg.has_section('download'):
rc_download = download(cfg)
if rc_download==True or is_file_fresh( filename_new, int(cfg.get('basic','срок годности'))):
pass
else:
return
for cfgFName in os.listdir("."):
if cfgFName.startswith("cfg") and cfgFName.endswith(".cfg"):
processing(cfgFName)
if __name__ == '__main__':
myName = os.path.basename(os.path.splitext(sys.argv[0])[0])
mydir = os.path.dirname (sys.argv[0])
print(mydir, myName)
main( myName)
| true |
c86d1ff8086248e3d48ccda22ff8c21805f6bbf7 | Python | wangweihao/Python | /5/5-5.py | UTF-8 | 293 | 3.65625 | 4 | [] | no_license | #!/usr/bin/env python
#coding:UTF-8
while 1:
str = raw_input('输入一个小于100美分的数字:')
num = int(str)
i = num / 25
print '25美分 is %d' % i
j = (num - i*25) / 10
print '10美分 is %d' % j
k = (num - i*25 - j*10) / 1
print '1美分 is %d' % k
| true |
4be388f395ea41bbf16ebd5c114972e993524e1b | Python | Shu-HowTing/Code-exercises | /E29.py | UTF-8 | 745 | 3.5625 | 4 | [] | no_license | # -*- coding: utf-8 -*-
# Author: 小狼狗
'''
网易笔试:
x,y都是正整数,x、y<=n,且x%y>=k,求(x,y)一共有多少可能
'''
# def number(n, k):
# count = 0
# for x in range(1,n+1):
# for y in range(1,n+1):
# if x % y >= k:
# count += 1
# return count
# if __name__ == '__main__':
# n,k = [int(x) for x in input().split()]
# print(number(n,k))
#O(n)
def numbers(n,k):
count = 0
if k==0:
return n**2
for y in range(k+1, n+1):
a,b = divmod(n,y)
count = count + a*(y-k)
if b>=k:
count = count + (b-k+1)
return count
if __name__ == '__main__':
n,k = [int(x)for x in input().split()]
print(numbers(n,k))
| true |
2a8a4aa258fcd6947ffb99b5fd621806e7f45740 | Python | tangarmukesh/mukeshpython | /test_module02.py | UTF-8 | 213 | 3 | 3 | [] | no_license | """Sample doctest test module..
test_module02"""
def mul(a,b):
"""
>>> mul(2,4)
8
>>> mul('a',2)
'aa'
"""
return a*b
def add(a,b):
"""
>>> add(1,3)
3
>>> add('a','b')
'ab'
"""
return a+b
| true |
fbceeaa4a908005f0835d675f2985325dd4cc145 | Python | cuiods/Coding | /Python/Course/unit2/pie.py | UTF-8 | 284 | 3.03125 | 3 | [] | no_license | import matplotlib.pyplot as plt
labels = "Frogs","Hogs","Dogs","Logs"
sizes = [15,30,45,10]
colors = ['yellowgreen','gold','lightskyblue','lightcoral']
explode = (0,0.1,0,0)
plt.pie(sizes, explode=explode, labels=labels, colors=colors, autopct='%1.1f%%')
plt.axis('equal')
plt.show() | true |
9ae5e1c04724deaf35efb54e58c42d7cd80a0d57 | Python | zerosum99/python_basic | /myPython/time_date/datetime_date.py | UTF-8 | 1,065 | 3.140625 | 3 | [] | no_license | # -*- coding: utf-8 -*-
"""
Created on Wed Feb 03 14:14:15 2016
@author: 06411
"""
import time
from datetime import datetime
from datetime import date
from datetime import timedelta
def date_call() :
da_to = date.today()
print " date min ", date.min, type(date.min)
print " data max ", date.max
print " date resolution ", date.resolution, type(date.resolution)
print "today :", da_to
print " strftime :", da_to.strftime("%y %m %d")
print " timestamp :", date.fromtimestamp(time.time())
td = date.today() - date(1,1,1)
print "time delta julian :", type(td), td.days
print " ordinal : ", date.fromordinal(td.days+1)
print type(timedelta.max), timedelta.min
print " min ordinal : ", timedelta.min.days
print " max ordinal : ", timedelta.max.days
print " datetiem max : ", datetime.max.date()
td_max = datetime.max.date() - date(1,1,1)
print " max julian : ", td_max.days
print " max ordinal : ", date.fromordinal(td_max.days+1)
if __name__ == "__main__" :
date_call() | true |
c91b010e80c4a333ed2eb3f7b5102d709a2bf355 | Python | alfonsof/music-list | /musiclist.py | UTF-8 | 4,630 | 3.0625 | 3 | [
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | #!/usr/bin/python
# -*- coding: utf-8 -*-
# musiclist.py
# Main application of musiclist tool.
# This utility allows to read a music structure in your file system,
# and create a file with the information in several formats.
import os
import sys
import argparse
from musicmod import createlist
from musicmod import viewlist
def is_dir(string):
if not string: # Not directory parameter
return os.getcwd()
else:
if os.path.isdir(string):
return string
else:
print('Error, directory \'' + string + '\' does not exist')
sys.exit(1)
return
def main():
# Make a list of command line arguments, omitting the [0] element
# which is the script itself.
args = sys.argv[1:]
if not args:
print('\nusage: musiclist.py [-h] [--path MUSIC_DIR] [-p]\n' + \
' [-f FILE_NAME] [-d DB_NAME] [-c CSV_NAME]\n' + \
' [-j JSON_NAME] [-j2 JSON_NAME]\n' + \
' [-x XML_NAME] [-x2 XML_NAME]\n' + \
' [--html HTML_NAME]\n' + \
' [--dbview DB_VIEW] [--csvview CSV_VIEW]\n' + \
' [--jsonview JSON_VIEW] [--xmlview XML_VIEW]\n' + \
' [--htmlview HTML_VIEW]')
return
parser = argparse.ArgumentParser(description='Manage music list')
parser.add_argument('--path', type=is_dir, action='store', default='', dest="music_dir", help='directory where the music is')
parser.add_argument('-p', '--print', action='store_true', default=False, dest='printlist', help='print music list')
parser.add_argument('-f', '--file', action='store', dest="file_name", help='write music list to a text file')
parser.add_argument('-d', '--db', action='store', dest="db_name", help='write music list to SQLite Database')
parser.add_argument('-c', '--csv', action='store', dest="csv_name", help='write music list to a CSV file')
parser.add_argument('-j', '--json', action='store', dest="json_name", help='write music list (music list) to a JSON file')
parser.add_argument('-j2', '--json2', action='store', dest="json_name2", help='write music list (tracks list) to a JSON file')
parser.add_argument('-x', '--xml', action='store', dest="xml_name", help='write music list (music list) to an XML file')
parser.add_argument('-x2', '--xml2', action='store', dest="xml_name2", help='write music list (tracks list) to an XML file')
parser.add_argument('--html', action='store', dest="html_name", help='write music list to an HTML file')
parser.add_argument('--dbview', action='store', dest="db_view", help='view music list from a SQLite Database')
parser.add_argument('--csvview', action='store', dest="csv_view", help='view music list from a CSV file')
parser.add_argument('--jsonview', action='store', dest="json_view", help='view music list from a JSON file')
parser.add_argument('--xmlview', action='store', dest="xml_view", help='view music list from an XML file')
parser.add_argument('--htmlview', action='store', dest="html_view", help='view music list from an HTML file')
args = parser.parse_args()
# Load music information from the directory
if args.printlist or args.file_name or args.db_name or args.csv_name or \
args.json_name or args.json_name2 or \
args.xml_name or args.xml_name2 or \
args.html_name:
createlist.load_music_list(args.music_dir)
# Execute options
if args.printlist:
createlist.print_list()
if args.file_name:
createlist.file_list(args.file_name)
if args.db_name:
createlist.db_list(args.db_name)
if args.csv_name:
createlist.csv_list(args.csv_name)
if args.json_name:
createlist.json_list_music(args.json_name)
if args.json_name2:
createlist.json_list_tracks(args.json_name2)
if args.xml_name:
createlist.xml_list_music(args.xml_name)
if args.xml_name2:
createlist.xml_list_tracks(args.xml_name2)
if args.html_name:
createlist.html_list(args.html_name)
if args.db_view:
viewlist.db_list(args.db_view)
if args.csv_view:
viewlist.csv_list(args.csv_view)
if args.json_view:
viewlist.json_list(args.json_view)
if args.xml_view:
viewlist.xml_list(args.xml_view)
if args.html_view:
viewlist.html_list(args.html_view)
return
# This is the standard boilerplate that calls the main() function.
if __name__ == '__main__':
main()
| true |
56536caeca46010e344d75e933d157d2b8bbbd46 | Python | lsroudi/pythonTraining | /quickTour/function.py | UTF-8 | 195 | 3.71875 | 4 | [
"MIT"
] | permissive | def fibonacci(n):
a,b = 0,1
if(n==a):
return a
if(n==b):
return b
return fibonacci(n-1)+fibonacci(n-2)
for n in range(0,10):
print(fibonacci(n))
| true |
a195ec6ea2283840ddb79d1a00f41a17ed4b71e7 | Python | yifr/V1-behavior-neurons | /scripts/data_testing.py | UTF-8 | 2,008 | 2.640625 | 3 | [] | no_license | import os
import h5py
from scipy.io import loadmat
from behavenet import get_user_dir
def test_cell_type_subsamples(session='1', animal='MD0ST5', lab='dipoppa', expt='full_trial',
sample='sst85_sample_0', cell_id=3):
data_dir = get_user_dir('data')
path = os.path.join(data_dir, lab, expt, animal, session, 'data.hdf5')
if not os.path.exists(path):
print(path, ' does not exist.')
return
# Get cell types (Need to change this for different mice)
info_cells = loadmat('neural_dir/info_cells_MD0ST5_2018-04-04.mat')['info_cells']
cell_type_idxs = info_cells[0][0][0][0] # 0 = excitatory, 3 = inhibitory
good_cell_idxs = info_cells[0][0][1][0]
# Label all "bad" cells as -1
cell_type_idxs = [cell_type_idxs[i] if good_cell_idxs[i] == 1 else -1 for i in range(len(good_cell_idxs))]
def select_idxs(arr, key):
return [i for i in range(len(arr)) if arr[i] == key]
idxs = select_idxs(cell_type_idxs, cell_id)
data = h5py.File(path, 'r', libver='latest', swmr=True)
cell_types = data['samples']['cell_types']
session = cell_types[sample]
# Correct number of indexes
assert len(session[:]) == len(idxs)
# No repeats
assert len(set(session[:])) == len(idxs)
data.close()
print('Cell Type Subsample: Success')
def test_subsamples(session='1', animal='MD0ST5', lab='dipoppa', expt='full_trial',
sample='sample_100_t0', cell_id=3):
data_dir = get_user_dir('data')
path = os.path.join(data_dir, lab, expt, animal, session, 'data.hdf5')
if not os.path.exists(path):
print(path, ' does not exist.')
return
data = h5py.File(path, 'r', libver='latest', swmr=True)
subsamples = data['samples']['subsamples']
session = subsamples[sample]
# No duplicates
assert len(set(session[:])) == len(session[:])
print('Subsample Test: Success')
data.close()
test_subsamples()
test_cell_type_subsamples()
| true |
b7d8d079c17afae1690e988054aa696d5cdab6a5 | Python | Codeducate/codeducate.github.io | /students/python-projects-2016/chada_patel.py | UTF-8 | 4,760 | 3.78125 | 4 | [] | no_license |
#the first input is mathematics
a=int(input())
a2 = str(a)
#the second input is science
b=int(input())
b2 = str(b)
#the third input is english
c=int(input())
c2 = str(c)
#the fourth input is your language course (spanish,french,latin,german)
d=int(input())
d2 = str(d)
#the fifth input is history
e=int(input())
e2 = str(e)
my_average = ((a + b + c + d + e)/5)
#__________________________________________________________________________________________________________
#this section is for mathematics
if a>=98:
print ("You are doing great in math!" + "-" + a2)
elif a>=93:
print ("You are doing good in math" + "-" + a2)
elif a>=90:
print ("You are on the right path in math, but you can do better!" + "-" + a2)
elif a>=88:
print ("Come on, you just need to study harder and bump your grade up to an A" + "-" + a2)
elif a>=83:
print ("Come on I know you can step up your grade in math" + "-" + a2)
elif a>=80:
print ("From now on you need to study for every test" + "-" + a2)
else:
print ("go to this site and start helping yourself: http://www.classzone.com/cz/books/algebra_1_2007_na/book_home.htm?state=VA" + " " + "-" + a2)
#______________________________________________________________________________________
#this section is for science
if b>=98:
print ("You are doing great in science!" + "-" + b2)
elif b>=93:
print ("You are doing good in science" + "-" + b2)
elif b>=90:
print ("You are on the right path in science, but you can do better!" + "-" + b2)
elif b>=88:
print ("Come on, you just need to study harder and bump your grade up to an A" + "-" + b2)
elif b>=83:
print ("Come on I know you can step up your grade in science" + "-" + b2)
elif b>=80:
print ("From now on you need to study for every test" + "-" + b2)
else:
print ("http://www.lcps.org/cms/lib4/VA01000195/Centricity/Domain/4996/Notes%20Book%20Semester%201%20Characteristics-Genetics.pdf" + " " + "-" + b2)
#____________________________________________________________________________________
#this section is for English
if c>=98:
print ("You are doing great in English!" + "-" + c2)
elif c>=93:
print ("You are doing good in English" + "-" + c2)
elif c>=90:
print ("You are on the right path in English, but you can do better!" + "-" + c2)
elif c>=88:
print ("Come on, you just need to study harder and bump your grade up to an A" + "-" + c2)
elif c>=83:
print ("Come on I know you can step up your grade in English" + "-" + c2)
elif c>=80:
print ("From now on you need to study for every test" + "-" + c2)
else:
print ("http://interactivesites.weebly.com/language-arts.html" + " " + "-" + c2)
#______________________________________________________________________________________
if d>=98:
print ("You are doing great in your language course!" + "-" + d2)
elif d>=93:
print ("You are doing good in your language course" + "-" + d2)
elif d>=90:
print ("You are on the right path in your language course, but you can do better!" + "-" + d2)
elif d>=88:
print ("Come on, you just need to study harder and bump your grade up to an A" + "-" + d2)
elif d>=83:
print ("Come on I know you can step up your grade in English" + "-" + d2)
elif d>=80:
print ("From now on you need to study for every test" + "-" + d2)
else:
print ("https://quizlet.com/" + " " + "-" + d2)
#________________________________________________________________________________
if e>=98:
print ("You are doing great in history!" + "-" + e2)
elif e>=93:
print ("You are doing good in history" + "-" + e2)
elif e>=90:
print ("You are on the right path in history, but you can do better!" + "-" + e2)
elif e>=88:
print ("Come on, you just need to study harder and bump your grade up to an A" + "-" + e2)
elif e>=83:
print ("Come on I know you can step up your grade in history" + "-" + e2)
elif e>=80:
print ("From now on you need to study for every test in history" + "-" + e2)
else:
print ("http://americanhistory.abc-clio.com/" + " " + "-" + e2)
#____________________________________________________________________________________
if my_average>=98:
print ("You are doing great in school!")
elif my_average>=93:
print ("You are doing good in school")
elif my_average>=90:
print ("You are on the right path in school, but you can do better!")
elif my_average>=88:
print ("Come on, you just need to study harder and bump your average grade into an A in school")
elif my_average>=83:
print ("Come on I know you can step up your grade in school")
elif my_average>=80:
print ("From now on you need to study for every test in school")
else: print ("You need to give more importance to school:(")
print (str((my_average)) + (" is your average grade in school")) | true |
5bbbac5755318e5d8c73af35e2e13faf63587b7d | Python | Whitehouse112/Paintings-Detection | /painting_rectification.py | UTF-8 | 7,476 | 2.6875 | 3 | [] | no_license | import numpy as np
import cv2
f_tot = 1000
num_f = 1
focal_length = 1000
def find_intersections(lines):
horizontals = []
verticals = []
for line in lines:
rho, theta = line[0] # x*cos(th) + y*sin(th) = rho
angle = (theta * 360) / (2 * np.pi)
if -45 <= angle < 45 or 135 <= angle < 225:
horizontals.append(line)
else:
verticals.append(line)
intersections = []
for horiz in horizontals:
for vert in verticals:
rho1, theta1 = horiz[0]
rho2, theta2 = vert[0]
th_mat = np.array([
[np.cos(theta1), np.sin(theta1)],
[np.cos(theta2), np.sin(theta2)]
])
b = np.array([[rho1], [rho2]])
x0, y0 = np.linalg.solve(th_mat, b)
intersections.append([[x0[0], y0[0]]])
intersections = np.array(intersections, dtype=np.float32)
return intersections
def vertices_kmeans(intersections):
criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 10, 1.0)
_, _, centers = cv2.kmeans(intersections, 4, None, criteria, 10, cv2.KMEANS_RANDOM_CENTERS)
centers = np.expand_dims(np.array(centers, dtype=np.float32), axis=1)
return centers
def check_vertices(tl, tr, bl, br, frame_h, frame_w):
tl[0] = np.clip(tl[0], -200, frame_w)
tl[1] = np.clip(tl[1], -200, frame_h)
tr[0] = np.clip(tr[0], 0, frame_w + 200)
tr[1] = np.clip(tr[1], -200, frame_h)
bl[0] = np.clip(bl[0], -200, frame_w)
bl[1] = np.clip(bl[1], 0, frame_h + 200)
br[0] = np.clip(br[0], 0, frame_w + 200)
br[1] = np.clip(br[1], 0, frame_h + 200)
return tl, tr, bl, br
def order_centers(centers):
dtype = [('x', centers.dtype), ('y', centers.dtype)]
centers = centers.ravel().view(dtype)
centers.sort(order=['x'])
left_most = centers[:2]
right_most = centers[2:]
left_most.sort(order=['y'])
tl, bl = left_most
tl = np.array(list(tl), dtype=np.float32)
bl = np.array(list(bl), dtype=np.float32)
right_most.sort(order=['y'])
tr, br = right_most
tr = np.array(list(tr), dtype=np.float32)
br = np.array(list(br), dtype=np.float32)
return tl, tr, bl, br
def compute_aspect_ratio(tl, tr, bl, br, frame_shape):
"""
Whiteboard scanning and image enhancement
Zhengyou Zhang, Li-Wei He
https://www.microsoft.com/en-us/research/uploads/prod/2016/11/Digital-Signal-Processing.pdf
"""
h1 = bl[1] - tl[1]
h2 = br[1] - tr[1]
w1 = tr[0] - tl[0]
w2 = br[0] - bl[0]
h = max(h1, h2)
w = max(w1, w2)
# image center
u0 = frame_shape[1] / 2
v0 = frame_shape[0] / 2
ar_vis = w / h # visible aspect ratio
m1 = np.append(tl, 1)
m2 = np.append(tr, 1)
m3 = np.append(bl, 1)
m4 = np.append(br, 1)
# cross product = prodotto vettoriale
# dot product = prodotto scalare
k2 = np.dot(np.cross(m1, m4), m3) / np.dot(np.cross(m2, m4), m3)
k3 = np.dot(np.cross(m1, m4), m2) / np.dot(np.cross(m3, m4), m2)
n2 = k2 * m2 - m1
n3 = k3 * m3 - m1
n21, n22, n23 = n2
n31, n32, n33 = n3
if n23 != 0 and n33 != 0:
f_squared = -((1 / (n23 * n33)) * ((n21 * n31 - (n21 * n33 + n23 * n31) * u0 + n23 * n33 * (u0 ** 2)) + (
n22 * n32 - (n22 * n33 + n23 * n32) * v0 + n23 * n33 * (v0 ** 2))))
global focal_length, f_tot, num_f
if 0 < f_squared < 2000 ** 2 and num_f < 300:
f = np.sqrt(f_squared) # focal-lenght in pixels
f_tot += f
num_f += 1
focal_length = f_tot / num_f
f = focal_length
A = np.array([[f, 0, u0], [0, f, v0], [0, 0, 1]], dtype=np.float32)
At = np.transpose(A)
Ati = np.linalg.inv(At)
Ai = np.linalg.inv(A)
# calculate the real aspect ratio
ar_real = np.sqrt(np.dot(np.dot(np.dot(n2, Ati), Ai), n2) / np.dot(np.dot(np.dot(n3, Ati), Ai), n3))
else:
ar_real = np.sqrt((n21 ** 2 + n22 ** 2) / (n31 ** 2 + n32 ** 2))
if ar_real < ar_vis:
w = int(w)
h = int(w / ar_real)
else:
h = int(h)
w = int(ar_real * h)
return h, w
def rectify_paintings(roi_list, cont_list, frame):
new_roi_list = []
new_cont_list = []
rectified = []
# img_lines = np.zeros_like(frame)
for idx, contour in enumerate(cont_list):
# Polygonal approximation
epsilon = 0.02 * cv2.arcLength(contour, True)
approx = cv2.approxPolyDP(contour, epsilon, True)
img_poly = np.zeros_like(frame)
cv2.drawContours(img_poly, [approx], -1, (0, 255, 0), thickness=5)
img_poly = cv2.cvtColor(img_poly, cv2.COLOR_BGR2GRAY)
# Finding lines with Hough transform
lines = cv2.HoughLines(img_poly, 1.3, np.pi / 180, 150)
if lines is None:
continue
# Lines intersections
intersections = find_intersections(lines)
if len(intersections) < 4:
continue
# Average vertices with K-Means
vertices = vertices_kmeans(intersections)
# Ordering vertices
tl, tr, bl, br = order_centers(vertices)
tl, tr, bl, br = check_vertices(tl, tr, bl, br, frame.shape[0], frame.shape[1])
frame_h, frame_w = frame.shape[0:2]
hmax, hmin = max(bl[1] - tl[1], br[1] - tr[1]), min(bl[1] - tl[1], br[1] - tr[1])
wmax, wmin = max(tr[0] - tl[0], br[0] - bl[0]), min(tr[0] - tl[0], br[0] - bl[0])
if not(100 <= hmax <= frame_h and 100 <= hmin <= frame_h and 100 <= wmax <= frame_w and 100 <= wmin <= frame_w):
x, y, w, h = roi_list[idx]
img = frame[y:y + h, x:x + w]
rectified.append(img)
new_roi_list.append(roi_list[idx])
new_cont_list.append(cont_list[idx])
continue
# Compute aspect-ratio
h, w = compute_aspect_ratio(tl, tr, bl, br, frame.shape)
# Warp perspective
pts_src = np.array([[0, 0], [w, 0], [0, h], [w, h]], dtype=np.float32)
pts_dst = np.array([tl, tr, bl, br], dtype=np.float32)
m, _ = cv2.findHomography(pts_src, pts_dst, method=cv2.RANSAC)
warped = cv2.warpPerspective(frame, m, (w, h), flags=cv2.WARP_INVERSE_MAP)
rectified.append(warped)
new_roi_list.append(roi_list[idx])
new_cont_list.append(cont_list[idx])
# draw_lines(img_lines, approx, lines, vertices)
# cv2.namedWindow("Lines", flags=cv2.WINDOW_AUTOSIZE | cv2.WINDOW_KEEPRATIO | cv2.WINDOW_GUI_NORMAL)
# cv2.imshow("Lines", cv2.resize(img_lines, (1280, 720)))
return rectified, new_roi_list, new_cont_list
def draw_lines(img_lines, contour, lines, vertices=None):
img_lines = np.zeros_like(img_lines)
cv2.drawContours(img_lines, [contour], -1, (255, 0, 0), thickness=2)
for line in lines:
rho, theta = line[0]
a = np.cos(theta)
b = np.sin(theta)
x0 = a * rho
y0 = b * rho
x1 = int(x0 + 2000 * (-b))
y1 = int(y0 + 2000 * a)
x2 = int(x0 - 2000 * (-b))
y2 = int(y0 - 2000 * a)
cv2.line(img_lines, (x1, y1), (x2, y2), (0, 255, 0), thickness=1)
if vertices is not None:
cv2.drawContours(img_lines, np.array(vertices, dtype=np.int), -1, (0, 0, 255), thickness=5)
# cv2.namedWindow("Lines", flags=cv2.WINDOW_AUTOSIZE | cv2.WINDOW_KEEPRATIO | cv2.WINDOW_GUI_NORMAL)
# cv2.imshow("Lines", cv2.resize(img_lines, (1280, 720)))
| true |
1ca927e8b116c29d34c431263d56aad229b8ee77 | Python | mojtabazahedi/News-Recommendation-System | /newstweets.py | UTF-8 | 629 | 2.546875 | 3 | [] | no_license | __author__ = 'Macroboy'
import re
import sqlite3
########################################################
db = sqlite3.connect('NewsData.db')
c = db.cursor()
########################################################
c.execute('select * from news')
rows = c.fetchall()
for row in rows:
rawtext = row[1]
id=str(row[2])
text = rawtext.encode('ascii', 'ignore')
url = re.sub(r"http\S+", "", text)
c.execute('''INSERT INTO textonly ( text,id) VALUES(?,?)''', (url ,id))
#print rawtext,
#report = c.execute("UPDATE tweets set text= ? WHERE ID= ?", (str(url), str(id)))
#print c.rowcount
db.commit()
| true |
de8ecd252c8cecee8a8745f1d7bfc8fa860b8227 | Python | lllillly/python_chatting_ass1 | /server.py | UTF-8 | 1,621 | 3.21875 | 3 | [] | no_license | from twisted.internet import protocol, reactor
import names
from colorama import Fore, Back
# 어떤 사용자가 보낸 메시지를 다른 사용자에게 전달
transports = set() # 클라이언트를 저장할 변수
users = set() # 사용자의 이름을 저장할 변수
COLORS = [
"\033[31m", # RED
"\033[32m", # GREEN
"\033[33m", # YELLOW
"\033[34m", # BLUE
"\033[35m", # MAGENTA
"\033[36m", # CYAN
"\033[37m", # WHITE
"\033[4m", # UNDERLINE
]
class Chat(protocol.Protocol):
def connectionMade(self):
# self.transport.write("connected!".encode())
# 클라이언트에게 connected 메시지 전송
name = names.get_first_name() # 랜덤한 이름 생성
color = COLORS[len(users) % len(COLORS)]
# 랜덤한 색상 저장
users.add(name) # users에 추가
transports.add(self.transport) # 사용자가 접속하면 transport(클라이언트) 추가
self.transport.write(f"{color}{name}\033[0m".encode()) # 사용자가 접속하면 이름 부여
print(f"현재 접속자 수 : {len(users)}명")
def dataReceived(self, data):
for t in transports: # 모든 클라이언트를 하나씩 돌면서(for 루프) :
if self.transport is not t: # 만일 내가 보낸 메시지가 아니라면 :
t.write(data) # 메시지를 전달
print(data.decode("utf-8"))
class ChatFactory(protocol.Factory):
def buildProtocol(self, addr):
return Chat()
print("🔒 Server Started!")
reactor.listenTCP(8000, ChatFactory())
reactor.run()
| true |
e812c06489cc898cf70e358d678e364fa6f7359a | Python | kspra3/Algorithms-and-Programming-Fundamentals | /Workshop11/Task2A.py | UTF-8 | 865 | 3.96875 | 4 | [] | no_license | import random
import timeit
def power1(x, n):
'computes x to the power of n'
value = 1
for k in range(n):
value *= x
return value
def power2(x,n):
'computes x to the power of n'
value = 1
if n > 0:
value = power2(x, n // 2)
if n % 2 == 0:
value = value * value
else:
value = value * value * x
return value
'generate random integers for x and n'
x = random.randrange(1,100)
n = random.randrange(1,100)
'start timing power1'
start = timeit.default_timer()
power1(x,n)
'finish timing'
print("Time taken by power1 was: " + str(timeit.default_timer()-start) + " seconds.")
'start timing power2'
start = timeit.default_timer()
power2(x,n)
'finish timing power2'
print("Time taken by power2 was: " + str(timeit.default_timer()-start) + " seconds.")
| true |
132c016cb41fc21e2ccf7fe5ad087edba3bf0d4f | Python | jasonbrackman/writing_a_compiler | /compilers/gone/llvmgen.py | UTF-8 | 16,749 | 3.375 | 3 | [] | no_license | # gone/llvmgen.py
"""
Project 5 : Generate LLVM
=========================
In this project, you're going to translate the SSA intermediate code
into LLVM IR. Once you're done, your code will be runnable. It
is strongly advised that you do *all* of the steps of Exercise 5
prior to starting this project. Don't rush into it.
For Project 5, you are going to emit all of the LLVM instructions into
a single function main(). This is a temporary shim to get things to
work before we implement further support for user-defined functions in
Project 8.
Further instructions are contained in the comments below.
"""
# LLVM imports. Don't change this.
from llvmlite.ir import (
Module, IRBuilder, Function, IntType, DoubleType, VoidType, Constant, GlobalVariable,
FunctionType
)
# Declare the LLVM type objects that you want to use for the low-level
# in our intermediate code. Basically, you're going to need to
# declare the integer, float, and string types here. These correspond
# to the types being used the intermediate code being created by
# the ircode.py file.
int_type = IntType(32) # 32-bit integer
float_type = DoubleType() # 64-bit float
bool_type = IntType(1) # 1-bit integer (bool)
string_type = None # Up to you (leave until the end)
void_type = VoidType() # Void type. This is a special type
# used for internal functions returning
# no value
# A dictionary that maps the typenames used in IR to the corresponding
# LLVM types defined above. This is mainly provided for convenience
# so you can quickly look up the type object given its type name.
typemap = {
'int': int_type,
'float': float_type,
'string': string_type,
'bool': bool_type
}
# The following class is going to generate the LLVM instruction stream.
# The basic features of this class are going to mirror the experiments
# you tried in Exercise 5. The execution model is somewhat similar
# to the visitor class.
#
# Given a sequence of instruction tuples such as this:
#
# code = [
# ('literal_int', 1, '_int_1'),
# ('literal_int', 2, '_int_2'),
# ('add_int', '_int_1', '_int_2, '_int_3')
# ('print_int', '_int_3')
# ...
# ]
#
# The class executes methods self.emit_opcode(args). For example:
#
# self.emit_literal_int(1, '_int_1')
# self.emit_literal_int(2, '_int_2')
# self.emit_add_int('_int_1', '_int_2', '_int_3')
# self.emit_print_int('_int_3')
#
# Internally, you'll need to track variables, constants and other
# objects being created. Use a Python dictionary to emulate
# storage.
class GenerateLLVM(object):
def __init__(self, name='module'):
# Perform the basic LLVM initialization. You need the following parts:
#
# 1. A top-level Module object
# 2. A Function instance in which to insert code
# 3. A Builder instance to generate instructions
#
# Note: For project 5, we don't have any user-defined
# functions so we're just going to emit all LLVM code into a top
# level function void main() { ... }. This will get changed later.
self.module = Module(name)
self.function = Function(self.module,
FunctionType(void_type, []),
name='main')
self.block = self.function.append_basic_block('entry')
self.builder = IRBuilder(self.block)
# Dictionary that holds all of the global variable/function declarations.
# Any declaration in the Gone source code is going to get an entry here
self.vars = {}
# Dictionary that holds all of the temporary variables created in
# the intermediate code. For example, if you had an expression
# like this:
#
# a = b + c*d
#
# The corresponding intermediate code might look like this:
#
# ('load_int', 'b', 'int_1')
# ('load_int', 'c', 'int_2')
# ('load_int', 'd', 'int_3')
# ('mul_int', 'int_2','int_3','int_4')
# ('add_int', 'int_1','int_4','int_5')
# ('store_int', 'int_5', 'a')
#
# The self.temp dictionary below is used to map names such as 'int_1',
# 'int_2' to their corresponding LLVM values. Essentially, every time
# you make anything in LLVM, it gets stored here.
self.temps = {}
# Initialize the runtime library functions (see below)
self.declare_runtime_library()
def declare_runtime_library(self):
# Certain functions such as I/O and string handling are often easier
# to implement in an external C library. This method should make
# the LLVM declarations for any runtime functions to be used
# during code generation. Please note that runtime function
# functions are implemented in C in a separate file gonert.c
self.runtime = {}
# Declare printing functions
self.runtime['_print_int'] = Function(self.module,
FunctionType(void_type, [int_type]),
name="_print_int")
self.runtime['_print_float'] = Function(self.module,
FunctionType(void_type, [float_type]),
name="_print_float")
self.runtime['_print_bool'] = Function(self.module,
FunctionType(void_type, [int_type]),
name="_print_bool")
def generate_code(self, ircode):
# Given a sequence of SSA intermediate code tuples, generate LLVM
# instructions using the current builder (self.builder). Each
# opcode tuple (opcode, args) is dispatched to a method of the
# form self.emit_opcode(args)
# Gather all of the block labels
labels = [op[1] for op in ircode if op[0] == 'block']
# Make a dict of LLVM block objects (in advance!!!)
self.blocks = {label: self.function.append_basic_block(label)
for label in labels}
for opcode, *args in ircode:
if hasattr(self, 'emit_' + opcode):
getattr(self, 'emit_' + opcode)(*args)
else:
print('Warning: No emit_' + opcode + '() method')
# Add a return statement. Note, at this point, we don't really have
# user-defined functions so this is a bit of hack--it may be removed later.
self.builder.ret_void()
# ----------------------------------------------------------------------
# Opcode implementation. You must implement the opcodes. A few
# sample opcodes have been given to get you started.
# ----------------------------------------------------------------------
# Creation of literal values. Simply define as LLVM constants.
def emit_literal_int(self, value, target):
self.temps[target] = Constant(int_type, value)
def emit_literal_float(self, value, target):
self.temps[target] = Constant(float_type, value)
def emit_literal_bool(self, value, target):
self.temps[target] = Constant(bool_type, value)
# STRINGS BONUS: Nightmare scenarios :) --
# def emit_literal_string(self, value, target):
# self.temps[target] = Constant(string_type, value)
# Allocation of variables. Declare as global variables and set to
# a sensible initial value.
def emit_alloc_int(self, name):
var = GlobalVariable(self.module, int_type, name=name)
var.initializer = Constant(int_type, 0)
self.vars[name] = var
def emit_alloc_float(self, name):
var = GlobalVariable(self.module, float_type, name=name)
var.initializer = Constant(float_type, 0)
self.vars[name] = var
def emit_alloc_bool(self, name):
var = GlobalVariable(self.module, bool_type, name=name)
var.initializer = Constant(bool_type, 0)
self.vars[name] = var
# Load/store instructions for variables. Load needs to pull a
# value from a global variable and store in a temporary. Store
# goes in the opposite direction.
def emit_load_int(self, name, target):
self.temps[target] = self.builder.load(self.vars[name], target)
def emit_load_float(self, name, target):
self.temps[target] = self.builder.load(self.vars[name], target)
def emit_load_bool(self, name, target):
self.temps[target] = self.builder.load(self.vars[name], target)
def emit_store_int(self, source, target):
self.builder.store(self.temps[source], self.vars[target])
def emit_store_float(self, source, target):
self.builder.store(self.temps[source], self.vars[target])
def emit_store_bool(self, source, target):
self.builder.store(self.temps[source], self.vars[target])
# Binary + operator
def emit_add_int(self, left, right, target):
self.temps[target] = self.builder.add(self.temps[left], self.temps[right], target)
def emit_add_float(self, left, right, target):
self.temps[target] = self.builder.fadd(self.temps[left], self.temps[right], target)
# Binary - operator
def emit_sub_int(self, left, right, target):
self.temps[target] = self.builder.sub(self.temps[left], self.temps[right], target) # You must implement
def emit_sub_float(self, left, right, target):
self.temps[target] = self.builder.fsub(self.temps[left], self.temps[right], target)
# Binary * operator
def emit_mul_int(self, left, right, target):
self.temps[target] = self.builder.mul(self.temps[left], self.temps[right], target)
def emit_mul_float(self, left, right, target):
self.temps[target] = self.builder.fmul(self.temps[left], self.temps[right], target)
# Binary / operator
def emit_div_int(self, left, right, target):
self.temps[target] = self.builder.sdiv(self.temps[left], self.temps[right], target)
def emit_div_float(self, left, right, target):
self.temps[target] = self.builder.fdiv(self.temps[left], self.temps[right], target)
# Unary + operator
def emit_uadd_int(self, source, target):
self.temps[target] = self.builder.add(Constant(int_type, 0),
self.temps[source],
target)
def emit_uadd_float(self, source, target):
self.temps[target] = self.builder.fadd(Constant(float_type, 0),
self.temps[source],
target)
# Unary - operator
def emit_usub_int(self, source, target):
self.temps[target] = self.builder.sub(Constant(int_type, 0),
self.temps[source],
target)
def emit_usub_float(self, source, target):
self.temps[target] = self.builder.fsub(Constant(float_type, 0),
self.temps[source],
target)
# Binary < operator
def emit_lt_int(self, left, right, target):
self.temps[target] = self.builder.icmp_signed('<', self.temps[left], self.temps[right], target)
def emit_lt_float(self, left, right, target):
self.temps[target] = self.builder.fcmp_ordered('<', self.temps[left], self.temps[right], target)
# Binary <= operator
def emit_le_int(self, left, right, target):
self.temps[target] = self.builder.icmp_signed('<=', self.temps[left], self.temps[right], target)
def emit_le_float(self, left, right, target):
self.temps[target] = self.builder.fcmp_ordered('<=', self.temps[left], self.temps[right], target)
# Binary > operator
def emit_gt_int(self, left, right, target):
self.temps[target] = self.builder.icmp_signed('>', self.temps[left], self.temps[right], target)
def emit_gt_float(self, left, right, target):
self.temps[target] = self.builder.fcmp_ordered('>', self.temps[left], self.temps[right], target)
# Binary >= operator
def emit_ge_int(self, left, right, target):
self.temps[target] = self.builder.icmp_signed('>=', self.temps[left], self.temps[right], target)
def emit_ge_float(self, left, right, target):
self.temps[target] = self.builder.fcmp_ordered('>=', self.temps[left], self.temps[right], target)
# Binary == operator
def emit_eq_int(self, left, right, target):
self.temps[target] = self.builder.icmp_signed('==', self.temps[left], self.temps[right], target)
def emit_eq_bool(self, left, right, target):
self.temps[target] = self.builder.icmp_signed('==', self.temps[left], self.temps[right], target)
def emit_eq_float(self, left, right, target):
self.temps[target] = self.builder.fcmp_ordered('==', self.temps[left], self.temps[right], target)
# Binary != operator
def emit_ne_int(self, left, right, target):
self.temps[target] = self.builder.icmp_signed('!=', self.temps[left], self.temps[right], target)
def emit_ne_bool(self, left, right, target):
self.temps[target] = self.builder.icmp_signed('!=', self.temps[left], self.temps[right], target)
def emit_ne_float(self, left, right, target):
self.temps[target] = self.builder.fcmp_ordered('!=', self.temps[left], self.temps[right], target)
# Binary && operator
def emit_and_bool(self, left, right, target):
self.temps[target] = self.builder.and_(self.temps[left], self.temps[right], target)
# Binary || operator
def emit_or_bool(self, left, right, target):
self.temps[target] = self.builder.or_(self.temps[left], self.temps[right], target)
# Unary ! operator
def emit_not_bool(self, source, target):
self.temps[target] = self.builder.icmp_signed('==', self.temps[source], Constant(bool_type, 0), target)
# Print statements
def emit_print_int(self, source):
self.builder.call(self.runtime['_print_int'], [self.temps[source]])
def emit_print_float(self, source):
try:
self.builder.call(self.runtime['_print_float'], [self.temps[source]])
except KeyError as e:
print("Failed to print a float: {}".format(e))
def emit_print_bool(self, source):
tmp = self.builder.zext(self.temps[source], int_type)
self.builder.call(self.runtime['_print_bool'], [tmp])
# blocks
def emit_block(self, label):
self.builder.position_at_end(self.blocks[label])
def emit_branch(self, label):
self.builder.branch(self.blocks[label])
def emit_cbranch(self, testvar, iflabel, elselabel):
self.builder.cbranch(self.temps[testvar],
self.blocks[iflabel],
self.blocks[elselabel])
# Extern function declaration.
def emit_extern_func(self, name, return_type, parameter_names):
#print("emit_extern_func: ", name, return_type, parameter_names)
rettype = typemap[return_type]
parmtypes = [typemap[pname] for pname in parameter_names]
func_type = FunctionType(rettype, parmtypes)
self.vars[name] = Function(self.module, func_type, name=name)
# Call an external function.
def emit_call_func(self, name, args, target):
#print("NOT IMPLEMENTED: emit_call_func: ", name, *args)
func = self.vars[name]
argvals = [self.temps[name] for name in args]
#print('{}'.format(self.temps))
self.temps[target] = self.builder.call(func, argvals)
#######################################################################
# TESTING/MAIN PROGRAM
#######################################################################
def compile_llvm(source):
from .ircode import compile_ircode
# Compile intermediate code
# !!! This needs to be changed in Project 7/8
code = compile_ircode(source)
# Make the low-level code generator
generator = GenerateLLVM()
# Generate low-level code
# !!! This needs to be changed in Project 7/8
generator.generate_code(code)
return str(generator.module)
def main():
import sys
if len(sys.argv) != 2:
sys.stderr.write("Usage: python3 -m gone.llvmgen filename\n")
raise SystemExit(1)
source = open(sys.argv[1]).read()
llvm_code = compile_llvm(source)
print(llvm_code)
if __name__ == '__main__':
main()
| true |
3138f8b48c8e79f871bb7b1d29d28ab77b3e1a16 | Python | junteudjio/pytest-tutorial | /test_pytests/example7/test_example.py | UTF-8 | 515 | 2.59375 | 3 | [] | no_license | from example import get_and_upper_and_persist
import pytest
def test_get_and_upper_and_persist(monkeypatch):
monkeypatch.setattr('builtins.input', lambda:'string-data')
monkeypatch.setattr('example.db_persist', lambda x:None)
data = get_and_upper_and_persist()
assert data == 'STRING-DATA'
def test_get_non_string_and_error(monkeypatch):
monkeypatch.setattr('builtins.input', lambda: 1)
with pytest.raises(ValueError):
data = get_and_upper_and_persist() | true |
8ba42a83d212914b429bfabf9980b3423fbf5707 | Python | Gam1999/Practice-Python | /EncryptAndDecrypt.py | UTF-8 | 774 | 3.84375 | 4 | [] | no_license | import string
ROT13Encrypt = str.maketrans(
"ABCDEFGHIJKLMabcdefghijklmNOPQRSTUVWXYZnopqrstuvwxyz",
"NOPQRSTUVWXYZnopqrstuvwxyzABCDEFGHIJKLMabcdefghijklm")
ROT13Decrypt = str.maketrans(
"NOPQRSTUVWXYZnopqrstuvwxyzABCDEFGHIJKLMabcdefghijklm",
"ABCDEFGHIJKLMabcdefghijklmNOPQRSTUVWXYZnopqrstuvwxyz")
print("Select 2 options")
print(" - 1 encrypt with ROT 13")
print(" - 2 decrypt with ROT 13\n")
InputOption = int(input("Choose option: "))
InputText = input("Enter text: ")
TextEncrypt = ""
TextDecrypt = ""
if InputOption == 1:
TextEncrypt = str(InputText).translate(ROT13Encrypt)
print('Ciphertext is "'+TextEncrypt+'"')
else:
TextDecrypt = str(InputText).translate(ROT13Decrypt)
print('Plaintext is "'+TextDecrypt+'"') | true |
493b592f8bb76c6ab3e8307d906f4a3129a6f0f5 | Python | bodii/test-code | /python/python_test_002/04/13.py | UTF-8 | 259 | 2.53125 | 3 | [] | no_license | #!/usr/bin/env python3
# -*- coding:utf-8 -*-
import os
import shutil
# 删除目录
# mkdir 只能删除子目录 该函数只对空目录有用
# shutil.retree(path) 会删除在目录包含其他文件和目录的情况下将该目录删除
| true |
e606db5cfde22cc5f072d7001bf840d2239fd00f | Python | Zorro-Lin-7/Upwork | /direct_messages/services.py | UTF-8 | 2,940 | 2.703125 | 3 | [] | no_license | from django.core.exceptions import ValidationError
from django.utils import timezone
from django.db.models import Q
from .models import Message, ChatRoom
from .signals import message_read, message_sent # 导入Signal
class MessagingService(object):
def send_message(self, sender, recipient, message):
if sender == recipient:
raise ValidationError("You can't send message to yourself.")
# 创建message对象
message = Message(sender=sender, recipient=recipient, content=str(message))
message.save()
# 通过message_sent signal 发送message
message_sent.send(sender=message, from_user=message.sender, to=message.recipient)
return message, 200 # 最后返回message 对象和 200 http状态码
def get_unread_messages(self, user):
return Message.objects.all().filter(recipient=user, read_at=None) # 通过.filter操作数据库得到所有未读消息的集合
def read_message_formatted(self, message_id):
try:
message = Message.objects.get(id=message_id)
self.mark_as_read(message)
return message.sender.username + ": " + message.content # 以固定格式返回message
except Message.DoesNotExist:
return ""
def read_message(self, message_id):
# 读消息,并返回消息的文本
try:
message = Message.objects.get(id=message_id)
self.mark_as_read(message) # 读取消息的helper method,另外定义
return message.content
except Message.DoesNotExist:
return ""
def mark_as_read(self, message):
# 设定message读取时间,并发送已读信号,然后将修改后的message存储到数据库
if message.read_at is None:
message.read_at = timezone.now()
message_read.send(sender=message, from_user=message.sender, to=message.recipient)
message.save()
def get_conversations(self, user):
# 获取当前用户的对话列表
chatrooms = ChatRoom.objects.filter((Q(sender=user) | Q(recipient=user)))
chatroom_mapper = []
for chatroom in chatrooms:
chatroom_dict = {}
chatroom_dict['pk'] = chatroom.pk
if user == chatroom.sender:
recipient = chatroom.recipient
else:
recipient = chatroom.sender
chatroom_dict['recipient'] = recipient
chatroom_mapper.append(chatroom_dict)
return chatroom_mapper
def get_active_conversations(self, sender, recipient):
# 获取当前对话的消息列表
# Q 相当于SQL中的where
active_conversations = Message.objects.filter(
(Q(sender=sender) & Q(recipient=recipient)) |
(Q(sender=recipient) & Q(recipient=sender))
).order_by('sent_at')
return active_conversations
| true |
fbb9af4baf34162c7eb3f21c6db02d1cdf601066 | Python | dvdmrn/AV-study | /viapoint_editor/animationplayer/keylog_data.py | UTF-8 | 2,585 | 3.140625 | 3 | [] | no_license | import pygame
import time
import collections
import csv
pygame.init()
display_width = 800
display_height = 600
black = (0,0,0)
white = (255,255,255)
red = (255,0,0)
car_width = 73
keylogData = collections.OrderedDict()
gameDisplay = pygame.display.set_mode((display_width,display_height))
pygame.display.set_caption('A bit Racey')
clock = pygame.time.Clock()
carImg = pygame.image.load('schwa.png')
def car(x,y):
gameDisplay.blit(carImg,(x,y))
def text_objects(text, font):
textSurface = font.render(text, True, black)
return textSurface, textSurface.get_rect()
def message_display(text):
largeText = pygame.font.Font('freesansbold.ttf',20)
TextSurf, TextRect = text_objects(text, largeText)
TextRect.center = ((display_width/2),(display_height/2))
gameDisplay.blit(TextSurf, TextRect)
pygame.display.update()
# time.sleep(2)
game_loop()
def crash():
message_display('You Crashed')
def game_loop():
x = (display_width * 0.45)
y = (display_height * 0.8)
x_change = 0
gameExit = False
initTime = time.time()
pressVal = 0
while not gameExit:
# update time stamp
currentTime = time.time()
currentTime = currentTime - initTime
keylogData[currentTime] = pressVal
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
quit()
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_SPACE:
pressVal = 1
if event.key == pygame.K_RIGHT:
print keylogData
if event.key == pygame.K_e:
exportVals(keylogData)
if event.type == pygame.KEYUP:
if event.key == pygame.K_LEFT or event.key == pygame.K_RIGHT or event.key==pygame.K_SPACE:
pressVal = 0
x += x_change
gameDisplay.fill(white)
car(x,y)
pygame.display.set_caption(str(time.time()))
if x > display_width - car_width or x < 0:
crash()
pygame.display.update()
clock.tick(60)
def exportVals(klData):
with open('keylog_data.csv', 'w') as csvfile:
fieldnames = ['time', 'keypress']
writer = csv.writer(csvfile)
# writer.writeheader()
keylogData = klData.items()
print "writing keylog_data.csv"
for e in keylogData:
writer.writerow(e)
print "finished writing file"
game_loop()
pygame.quit()
quit() | true |