blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 281 | content_id stringlengths 40 40 | detected_licenses listlengths 0 57 | license_type stringclasses 2 values | repo_name stringlengths 6 116 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 313 values | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 18.2k 668M ⌀ | star_events_count int64 0 102k | fork_events_count int64 0 38.2k | gha_license_id stringclasses 17 values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 107 values | src_encoding stringclasses 20 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 4 6.02M | extension stringclasses 78 values | content stringlengths 2 6.02M | authors listlengths 1 1 | author stringlengths 0 175 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
5f4e30912f841b906361f0e772695f3f6d5b2393 | 393a2545700bd2d217dc2fd85a10d1490cfd36dd | /LeetCode/find_min_in_rotated_sorted_array_2.py | b1069020d04be9657e02caf8ad439134cea8b1d7 | [] | no_license | Jfeng3/careercup | 3b12d0c2f5b1b7ef317c32cf38760dad90508995 | 3087e67b8d44ebdca68b6face7c7b6b991f45d70 | refs/heads/master | 2020-05-15T12:03:43.867295 | 2015-01-27T20:55:58 | 2015-01-27T20:55:58 | 29,100,566 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 884 | py | class Solution:
# @param num, a list of integer
# @return an integer
def findMin(self, num):
return self.findMin_re(num,0,len(num)-1)
def findMin_re(self,num,start,end):
if start == end:
return num[start]
if start+1 == end:
if num[start]<=num[end]:
return num[start]
else:
return num[end]
mid = start + (end-start)/2
if num[end]>num[mid]:
return self.findMin_re(num,start,mid)
elif num[end]<num[mid]:
return self.findMin_re(num,mid+1,end)
elif num[start]>num[mid]:
return self.findMin_re(num,start+1,mid)
elif num[start]<num[mid]:
return self.findMin_re(num,start,mid-1)
else:
return min(self.findMin_re(num,start,mid),self.findMin_re(num,mid+1,end))
| [
"jfeng1115@gmail.com"
] | jfeng1115@gmail.com |
2a500a425eb1abbc023f928e0a265bbc37889d78 | 64fc5dfec9a6f7b31c224286321899f5103d3983 | /duckworthd/mining.py | f2127b917aeb1fa70482dce7c25ce5b13176311f | [] | no_license | espoirMur/public-DKHQ_GlobalWitness | 68aaaaef52a1b05773ded143060a0c5f45c14c6a | e0b0d2b669faa1cb6b3cc86791ff5ce306c1cfcb | refs/heads/master | 2020-04-17T17:22:23.323979 | 2018-10-11T19:50:42 | 2018-10-11T19:50:42 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 17,678 | py | """Utilities for working with satellite imagery for mining."""
import json
import os
import re
import string
import tempfile
import urllib
import zipfile
from matplotlib import pyplot as plt
import bcolz
import ee as earth_engine
earth_engine.Initialize()
import gdal
import h5py
import numpy as np
import pandas as pd
import geopandas as gpd
# Default directory containing images.
DEFAULT_IMAGE_ROOT = '/workspace/lab/data_s3/mines_ipis'
# Fusion Table ID containing polygons around mining sites.
DEFAULT_IPIS_MINING_POLYGONS = 'ft:1HG3R3cebqMp2yK0cOimTL7wLnh41c1DH24GyWQg1'
# Images with 4 axes. The first two are typical for images -- x, y. The
# third is color band (traditionally RGB, but Landsat captures more). The
# fourth is time, representing when the image was captured.
X_AXIS = 0
Y_AXIS = 1
BAND_AXIS = 2
TIME_AXIS = 3
def load_ipis_mining_sites_dataset():
"""Load all mining sites annotated by IPIS from FusionTable as GeoJSON."""
return earth_engine.FeatureCollection('ft:1P1f-A2Sl44YJEqtD1FvA1z7QtDFsRut1QziMD-nV').getInfo()
def _get_metadata_file_path(image_root):
"""Get absolute path to metadata.json file in a given directory.
If there are more than one metadata.json files, pick the last one after
sorting.
"""
if not os.path.exists(image_root):
raise ValueError(
u'%s does not exist. No metadata files found.' % image_root)
filenames = os.listdir(image_root)
metadata_filenames = [name for name in filenames if 'metadata' in name]
if not metadata_filenames:
raise ValueError(
u'No files with "metadata" in name found under %s' % image_root)
metadata_filename = list(sorted(metadata_filenames))[-1]
return os.path.join(image_root, metadata_filename)
def load_metadata(image_root=None):
"""Load JSON file storing image metadata from disk.
If no JSON file can be found, an empty DataFrame is returned.
"""
image_root = image_root or DEFAULT_IMAGE_ROOT
try:
fpath = _get_metadata_file_path(image_root)
except ValueError:
return pd.DataFrame(
columns=["bands", "collection", "dates", "dim", "fpath", "id"])
with open(fpath) as f:
return pd.DataFrame(json.load(f))
def save_metadata(image_root, metadata):
"""Store DataFrame containing image metadata to disk."""
if not os.path.exists(image_root):
os.makedirs(image_root)
with open(os.path.join(image_root, "metadata4.json"), "w") as f:
return metadata.to_json(f)
def merge_metadata(old_metadata, new_metadata):
"""Merge two metadata DataFrames."""
# Remove all rows from 'old_metadata' that have the same path as in 'new_metadata'
old_metadata = old_metadata[~old_metadata['fpath'].isin(
new_metadata['fpath'])]
# Concatenate new and old together.
return pd.concat([old_metadata, new_metadata], ignore_index=True)
def load_image(img_metadata, image_root=None):
"""Load a single image from disk."""
image_root = image_root or DEFAULT_IMAGE_ROOT
fname = os.path.join(image_root, img_metadata['fpath'])
return bcolz.open(fname)[:]
def geodataframe_to_earthengine(geodataframe):
"""Converts a GeoDataFrame to an ee.FeatureCollection."""
geojson_str = geodataframe.to_json()
geojson = json.loads(geojson_str)
return geojson_to_earthengine(geojson)
def geojson_to_earthengine(geojson):
"""Converts a GeoJSON dict to an Earth Engine type.
Args:
geojson: GeoJSON-supported object as a nested dict/list/tuple.
Returns:
A matching type that Earth Engine understands (e.g. ee.FeatureCollection, ee.Geometry.Point).
"""
if isinstance(geojson, dict):
if 'type' not in geojson:
raise ValueError("Not 'type' attribute in geojson: %s" % (geojson,))
if geojson['type'] == 'FeatureCollection':
return earth_engine.FeatureCollection(
geojson_to_earthengine(geojson['features']))
elif geojson['type'] == 'Feature':
return earth_engine.Feature(
geojson_to_earthengine(geojson['geometry']),
geojson['properties'])
elif geojson['type'] == 'Point':
return earth_engine.Geometry.Point(coords=geojson['coordinates'])
elif geojson['type'] == 'Polygon':
return earth_engine.Geometry.Polygon(
coords=geojson['coordinates'],
geodesic=geojson.get('geodesic', None))
raise ValueError("Unsupported GeoJSON dict type: %s" % geojson['type'])
elif isinstance(geojson, list):
return [geojson_to_earthengine(element) for element in geojson]
elif isinstance(geojson, tuple):
return tuple(geojson_to_earthengine(element) for element in geojson)
elif type(geojson) in [int, float, str, unicode]:
return geojson
else:
raise ValueError("Unable to parse type: %s" % type(geojson))
def to_earthengine_featurecollection(obj):
"""Converts an object to an ee.FeatureCollection.
'obj' can be one of:
- str: a Fusion Table ID ("ft:xxx")
- GeoDataFrame
- GeoJSON dict of type 'FeatureCollection'
"""
# If string, load FeatureCollection using Earth Engine.
if isinstance(obj, basestring):
return earth_engine.FeatureCollection(obj)
# If GeoDataFrame, convert to ee.FeatureCollection.
if isinstance(obj, gpd.GeoDataFrame):
return geodataframe_to_earthengine(obj)
# If GeoJSON, convert to ee.FeatureCollection.
if isinstance(obj, dict):
assert 'type' in obj
assert obj['type'] == 'FeatureCollection'
return geojson_to_earthengine(obj)
def load_image_mask(img_metadata, ipis_mining_sites=None, ipis_mining_polygons=None, image_root=None):
"""Load binary mask labeling pixels as "mining" or "not mining".
Args:
img_metadata: pd.Series from a metadata.json file.
ipis_mining_sites: FeatureCollection GeoJSON dict containing all IPIS
mining site locations as Points.
ipis_mining_polygons: Object that can be converted to an
ee.FeatureCollection. See to_earthengine_featurecollection() for
available options. Default's to Sina's Fusion Table.
image_root: string. unused?
Returns:
numpy array of shape [100, 100] with values {0, 1}, where 0.0 == no mine
and 1.0 == mine, centered at the location described by img_metadata.
"""
# If None, use the Fusion Table containing mining sites that Sina created.
if ipis_mining_sites is None:
ipis_mining_polygons = DEFAULT_IPIS_MINING_POLYGONS
ipis_mining_polygons = to_earthengine_featurecollection(ipis_mining_sites)
ipis_mining_image = ipis_mining_polygons.reduceToImage(
properties=['mine'],
reducer=earth_engine.Reducer.first()) # earth_engine.Image() type
# Get Point corresponding to this image from IPIS dataset.
roi_id = img_metadata['id']
if ipis_mining_sites is None:
ipis_mining_sites = load_ipis_mining_sites_dataset()
roi = ipis_mining_sites['features'][roi_id]['geometry']
assert roi['type'] == 'Point'
# Create a circle around the point with a given buffer size (in meters).
buff = 1500 # radius of 1500 meters about the point.
roi_point = earth_engine.Geometry.Point(roi['coordinates'])
roi_buff = earth_engine.Geometry.buffer(roi_point, buff) # ee.Geometry()
roi_buff = roi_buff.getInfo() # GeoJSON dict
# Download image containing circle from Earth Engine.
scale = 30 # 30 meters/pixel --> circle with 100 pixel diameter.
mask = load_map_tile_containing_roi(
ipis_mining_image, roi_buff['coordinates'], scale=scale)
# Some images are 101 x 101, some are 100 x 100. Let's ensure they're all
# 100 x 100.
mask = mask[:100, :100]
assert mask.shape[2] == 1, 'Mask has > 1 band.'
return mask.reshape(mask.shape[0], mask.shape[1])
def load_map_tile_containing_roi(image, roi, scale=30):
"""Get rasterized image containing ROI from Earth Engine.
Constructs a rasterized image tile subsetting 'image'. The image is large
enough to fully contain the polygon described by 'roi', and will contain
one pixel per 'scale' m^2 area.
Args:
image: ee.Image instance. To be used as mask. Must have exactly 1 band.
roi: Triple-nested list of floats, where lowest level is [longitude,
latitude] pairs from 'coordinates' of a GeoJSON polygon.
scale: int. Number of squared meters per pixel.
Returns:
numpy array of shape [N x M x K], where N is width, M is height, and K is
number of bands.
"""
# Generate a random filename.
filename = ''.join(np.random.choice(list(string.ascii_letters), size=10))
# Download image containing ROI.
url = earth_engine.data.makeDownloadUrl(
earth_engine.data.getDownloadId({
'image': image.serialize(),
'scale': '%d' % scale,
'filePerBand': 'false',
'name': filename,
'region': roi
}))
local_zip, headers = urllib.urlretrieve(url)
with zipfile.ZipFile(local_zip) as local_zipfile:
local_tif_filename = local_zipfile.extract(
filename + '.tif', tempfile.mkdtemp())
# Read image into memory. Result has shape [x, y, color bands].
dataset = gdal.Open(local_tif_filename, gdal.GA_ReadOnly)
bands = [dataset.GetRasterBand(i + 1).ReadAsArray()
for i in range(dataset.RasterCount)]
return np.stack(bands, axis=2)
def save_images(image_root, images, metadata):
"""Store a list of images to disk."""
assert len(images) == len(metadata)
if not os.path.exists(image_root):
os.makedirs(image_root)
for (img, (_, img_metadata)) in zip(images, metadata.iterrows()):
save_image(image_root, img, img_metadata)
def save_image(image_root, img, img_metadata):
"""Store a single image to disk."""
if not os.path.exists(image_root):
os.makedirs(image_root)
fname = os.path.join(image_root, img_metadata['fpath'])
dname = os.path.dirname(fname)
if not os.path.exists(dname):
os.makedirs(dname)
c = bcolz.carray(img, rootdir=fname, mode='w')
c.flush()
def save_images_with_hdf5(image_root, images, metadata):
assert len(images) > 0, "Must have 1+ images to write."
# Make directory if necessary.
if not os.path.exists(image_root):
os.makedirs(image_root)
# Construct an empty HDF5 dataset on disk.
image_shape = images[0].shape
initial_images_shape = (len(images),) + image_shape
max_images_shape = (None,) + image_shape
with h5py.File(os.path.join(image_root, "images.h5"), "w") as h5f:
dataset = h5f.create_dataset(
"images", initial_images_shape, maxshape=max_images_shape)
# Write images into space.
for i, image in enumerate(images):
dataset[i] = image
def save_images_with_bcolz(image_root, imgs, metadata):
assert len(imgs) == len(metadata)
# Make directory if necessary.
if not os.path.exists(image_root):
os.makedirs(image_root)
# Construct a bcolz array with the first image only.
assert len(imgs) > 0, "Must have 1+ images to write."
output_shape = (1, ) + imgs[0].shape
with bcolz.carray(imgs[0].reshape(output_shape), rootdir=os.path.join(image_root, "images"), mode="w") as array:
# Add all other images.
for i, img in enumerate(imgs):
if i == 0:
continue
array.append(img.reshape(output_shape))
def load_images_with_hdf5(image_root):
"""Load all images from HDF5 array."""
with h5py.File(os.path.join(image_root, "images.h5")) as h5f:
return h5f['images'][:]
def load_image_with_hdf5(image_root, img_metadata):
"""Load all images from HDF5 array."""
with h5py.File(os.path.join(image_root, "images.h5")) as h5f:
return h5f['images'][int(img_metadata.name)]
def load_images_with_bcolz(image_root):
"""Load all images from bcolz array."""
with bcolz.open(os.path.join(image_root, "images")) as array:
return array[:]
def load_image_with_bcolz(image_root, img_metadata):
"""Load a single image from bcolz array."""
with bcolz.open(os.path.join(image_root, "images")) as array:
return array[int(img_metadata.name)]
def plot_image(image, metadata=None, band=None, ax=None, cmap='gray'):
ax = ax or plt.gca()
# Aggregate over time.
if len(image.shape) == 4:
image = np.nanmedian(image, axis=TIME_AXIS)
# Select only the bands requested.
if len(image.shape) == 3:
assert band is not None, "You must choose a band to plot."
assert metadata is not None, "metadata required to select color band."
band_index = metadata['bands'].index(band)
image = image[:, :, band_index]
ax.imshow(image, cmap=cmap)
ax.xaxis.set_ticks([])
ax.yaxis.set_ticks([])
return ax
def canonicalize_image(img, img_metadata):
"""Canonicalize image for machine learning models.
- Aggregates across 2016/06 to 2017/06
- Drops all bands but B1...B11.
"""
img_metadata = img_metadata.copy()
# Get all dates in a 12 month span.
dates = [date for date in img_metadata['dates']
if date >= '20160601' and date < '20170601']
if len(dates) < 12:
raise ValueError(
"Found %d dates for the following image when 12 were expected. %s"
% (len(dates), img_metadata))
img_metadata['dates'] = dates
# Aggregate across 12 month span (hides cloud cover). Only keep the start
# date in the metadata, as there's exactly one date dimension.
img = np.nanmedian(img, axis=TIME_AXIS, keepdims=True)
img_metadata['dates'] = [dates[0]]
# Only keep raw bands. All others bands are simple functions of these.
bands = [band for band in img_metadata['bands']
if re.search('^B\d+$', band) is not None]
band_indices = [img_metadata['bands'].index(band) for band in bands]
img = img[:, :, band_indices]
img_metadata['bands'] = bands
img_metadata["dim"] = img.shape
return img, img_metadata
def canonicalize_image_by_month(img, img_metadata, band=None):
"""Canonicalize an image by taking its median pixel value per month.
Args:
img: numpy array, shape [height, width, num color bands, num dates].
img_metadata: pandas Series. Contains 'bands' and 'dates' entries.
band: None, string, or list of strings. If None, output all color
bands. If string, output a single color band, if list of strings,
output one color band per string.
"""
assert len(img.shape) == 4, "img must be [width, height color band, time]."
# Select bands to process.
if band is None:
bands = img_metadata["bands"]
if isinstance(band, basestring):
bands = [band]
elif isinstance(band, list):
bands = band
else:
raise ValueError("Unrecognized type for argument 'band': %s" % band)
band_idxs = [img_metadata["bands"].index(b) for b in bands]
img_band = img[:, :, band_idxs, :]
# Extract month out of each date (YYYYMMDD string)
dates = pd.DataFrame({"dates": img_metadata['dates']})
dates["month"] = dates["dates"].str.slice(4, 6)
# Construct result image. There will be 12 months.
width, height, _, _ = img.shape
result_img = np.full((width, height, len(bands), 12), np.nan)
for month, group in dates.groupby("month"):
# Select the appropriate time, color bands.
time_idxs = list(group.index)
img_month = img_band[:, :, :, time_idxs]
# Take median pixel intensity over time.
result_img[:, :, :, int(month) - 1] = np.nanmedian(
img_month, axis=[TIME_AXIS])
# Construct new metadata. We'll use the first date for each month in the
# grouping.
result_metadata = img_metadata.copy()
result_metadata["dim"] = result_img.shape
result_metadata["bands"] = bands
result_metadata["dates"] = list(dates.groupby("month").first()["dates"])
return result_img, result_metadata
def merge_canonical_image_and_mask(canonical_img, mask, img_metadata):
"""Combine canonical_image and mask into a single array."""
# Ensure canonical_img and mask have the same shape.
assert len(canonical_img.shape) == 4
mask = np.reshape(mask, [mask.shape[0], mask.shape[1], 1, 1])
# Copy time dim as many times as necessary to match 'canonical_img'.
mask = np.tile(mask, [1, 1, 1, canonical_img.shape[3]])
# Concatenate mask as the final band.
canonical_img = np.concatenate([canonical_img, mask], axis=BAND_AXIS)
# Add 'mask' as the final band to the metadata.
img_metadata = img_metadata.copy()
img_metadata['bands'] = img_metadata['bands'] + ['mask']
return canonical_img, img_metadata
def plot_monthly_image(img, img_metadata):
assert len(
img.shape) == 4, "img shape must be [height, width, color band, month]"
assert img.shape[3] == 12, "img must have 1 entry per month for every color band."
months = ["Jan", "Feb", "Mar", "Apr", "May", "June",
"July", "Aug", "Sept", "Oct", "Nov", "Dec"]
num_cols = len(img_metadata["bands"])
num_rows = len(months)
plt.figure(figsize=(2 * num_cols, 2 * num_rows))
for i in range(num_rows):
for j in range(num_cols):
ax = plt.subplot(num_rows, num_cols, i * num_cols + j + 1)
ax.set_title("%s/%s" % (months[i], img_metadata["bands"][j]))
plot_image(img[:, :, j, i])
| [
"duckworthd@gmail.com"
] | duckworthd@gmail.com |
ce1bfba60b3901fe4e19a9f0980780963272f3b5 | 893c0b6d790ac90467528849e303978d21b58590 | /box/migrations/0002_remove_feedback_level.py | 96f19f5a1b8a386da20eb20498247de771dbb119 | [] | no_license | thevyom1/Human-Resource-Management-software | 553e0a89b507407bdd2636d939b2857e41d17c35 | 4b634fa6b59f2f1de186d7b1d90de03feed452ea | refs/heads/master | 2021-01-17T18:06:41.004474 | 2016-07-04T07:19:43 | 2016-07-04T07:19:43 | 62,538,692 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 378 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.9.4 on 2016-06-28 17:26
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('box', '0001_initial'),
]
operations = [
migrations.RemoveField(
model_name='feedback',
name='level',
),
]
| [
"aggarwalvyom@gmail.com"
] | aggarwalvyom@gmail.com |
25d2fda834a74a7a0bfd09c7a9cb27683426f5a3 | 898fd947dad817d2d70d2390108b9a8d9fcac818 | /api/bls_data.py | ded1aee2b5a9e925f98b4ac6073bce7f32278775 | [] | no_license | N-ickMorris/Stock-Market-Portfolio | 8860085a32e14b43107118309b7c061c449508c5 | 20f804845eb7608b40ae26326b6edc1fcfc4811f | refs/heads/master | 2020-04-06T10:38:20.532899 | 2019-08-14T02:38:47 | 2019-08-14T02:38:47 | 157,386,687 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,241 | py | # This file is a simple wrapper to execute code provided entirely by Brian Dew
# the original code is here: https://github.com/bdecon/econ_data/blob/master/APIs/BLS.ipynb
# data management modules
import pandas as pd
import requests
import json
# a function for collecting unemployement data from the Bureau of Labor Statistics
def run(bls_key, start_year = 2000, end_year = 2019):
# set the base of the api url
api_url = "https://api.bls.gov/publicAPI/v2/timeseries/data/"
# set the key
key = "?registrationkey={}".format(bls_key)
# Series stored as a dictionary
series_dict = {
"LNS14000003": "White",
"LNS14000006": "Black",
"LNS14000009": "Hispanic"}
# Start year and end year
date_r = (start_year, end_year)
# Handle dates
dates = [(str(date_r[0]), str(date_r[1]))]
while int(dates[-1][1]) - int(dates[-1][0]) > 10:
dates = [(str(date_r[0]), str(date_r[0]+9))]
d1 = int(dates[-1][0])
while int(dates[-1][1]) < date_r[1]:
d1 = d1 + 10
d2 = min([date_r[1], d1+9])
dates.append((str(d1),(d2)))
# set up an object to store results
df = pd.DataFrame()
# collect data
for start, end in dates:
# Submit the list of series as data
data = json.dumps({
"seriesid": list(series_dict.keys()),
"startyear": start, "endyear": end})
# Post request for the data
p = requests.post(
"{}{}".format(api_url, key),
headers={"Content-type": "application/json"},
data=data).json()
for s in p["Results"]["series"]:
col = series_dict[s["seriesID"]]
for r in s["data"]:
date = pd.to_datetime("{} {}".format(
r["periodName"], r["year"]))
df.at[date, col] = float(r["value"])
# sort the data by time
df = df.sort_index()
df = df.reset_index()
# update column names
df.columns = ["datetime", "White_Unemployement", "Black_Unemployement", "Hispanic_Unemployement"]
# export results
return df | [
"noreply@github.com"
] | noreply@github.com |
d9ace0f42c9f2412ddfb11101c3b003020c12154 | 60d6b8501d0be546437b26a6ee1f9fab97ec3897 | /platypush/plugins/bluetooth/_model/_classes/_device/__init__.py | 3e568b84dc311d22699142a42b7c2ca993e4c511 | [
"MIT"
] | permissive | BlackLight/platypush | 68284a85b2f9eef303d26b04530f075927b5834a | 446bc2f67493d3554c5422242ff91d5b5c76d78a | refs/heads/master | 2023-08-31T21:01:53.519960 | 2023-08-29T22:05:38 | 2023-08-29T22:05:38 | 109,421,017 | 265 | 25 | MIT | 2023-09-01T23:15:49 | 2017-11-03T16:56:24 | Python | UTF-8 | Python | false | false | 138 | py | from ._major import MajorDeviceClass
from ._minor import MinorDeviceClass
__all__ = [
"MajorDeviceClass",
"MinorDeviceClass",
]
| [
"fabio@manganiello.tech"
] | fabio@manganiello.tech |
9c151b3a6ea5ad2faf547932fcbb58f8c96ed5ea | b22588340d7925b614a735bbbde1b351ad657ffc | /athena/Control/GaudiSequencer/share/test_athretrysequencer.py | 019c631baffe73d52c8ae1a6ebcba84383c65e5e | [] | no_license | rushioda/PIXELVALID_athena | 90befe12042c1249cbb3655dde1428bb9b9a42ce | 22df23187ef85e9c3120122c8375ea0e7d8ea440 | refs/heads/master | 2020-12-14T22:01:15.365949 | 2020-01-19T03:59:35 | 2020-01-19T03:59:35 | 234,836,993 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,634 | py | ###############################################################
#
# Job options file
#
#==============================================================
import AthenaCommon.Constants as Lvl
from AthenaCommon.AppMgr import theApp
from AthenaCommon.AppMgr import ServiceMgr as svcMgr
# get a handle on the job main sequence
from AthenaCommon.AlgSequence import AlgSequence, AthSequencer
job = AlgSequence()
#--------------------------------------------------------------
# Event related parameters
#--------------------------------------------------------------
if not 'EVTMAX' in dir():
EVTMAX = 10
pass
theApp.EvtMax = EVTMAX
#--------------------------------------------------------------
# Sequence(s)
#--------------------------------------------------------------
## Sequencer configuration ##
job += CfgMgr.AthRetrySequencer ('seq', MaxRetries = 5)
import AthenaPython.PyAthena as PyAthena
class PyPush(PyAthena.Alg):
def __init__(self, name='PyPush', **kw):
## init base class
kw['name'] = name
super(PyPush, self).__init__(**kw)
def initialize(self):
self.evts = 0
return PyAthena.StatusCode.Success
def execute(self):
self.setFilterPassed(True)
self.evts += 1
self.msg.info("events seen: %s" % self.evts)
return PyAthena.StatusCode.Success
def finalize(self):
self.msg.info("finalize ==> total events: %s" % self.evts)
return PyAthena.StatusCode.Success
pass
class PyPull(PyAthena.Alg):
def __init__(self, name='PyPull', **kw):
## init base class
kw['name'] = name
super(PyPull, self).__init__(**kw)
def initialize(self):
self.evts = 0
return PyAthena.StatusCode.Success
def execute(self):
self.evts += 1
self.setFilterPassed(True)
import random
if random.random() < 0.8:
self.msg.info("requesting more events!!")
self.setFilterPassed(False)
else:
self.msg.info("event quite satisfying...")
self.msg.info("seen %s event(s)" % self.evts)
return PyAthena.StatusCode.Success
def finalize(self):
self.msg.info("finalize ==> total events: %s" % self.evts)
return PyAthena.StatusCode.Success
pass
job.seq += PyPush("push", OutputLevel = Lvl.INFO)
job.seq += PyPull("pull", OutputLevel = Lvl.INFO)
job += CfgMgr.AthEventCounter("counter")
#svcMgr.MessageSvc.OutputLevel = Lvl.INFO
#==============================================================
#
# End of job options file
#
###############################################################
| [
"rushioda@lxplus754.cern.ch"
] | rushioda@lxplus754.cern.ch |
0f79b5f6017b14b689d663720b17fdf8884f29e5 | a43d1f047c40f29b9fd52ec7ee9f81bd6a6846e3 | /10 Fancy Quotes.py | 5d5295a9bcfbdb011c3120c1057fbd1d31ae1a84 | [] | no_license | RakhshandaMujib/CodeChef-Problems | 00f6d19e7622fe655d783087cb21bbd327763e7b | abe8fac0fc76eeea8a95bc63eb34f6b6ce306704 | refs/heads/master | 2021-06-23T03:49:36.085943 | 2021-06-14T10:42:06 | 2021-06-14T10:42:06 | 201,504,672 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 179 | py | T = int(input())
for _ in range(T):
s = input()
if s == s.lower() or s == ' ':
if 'not' in list(s.split()):
print('Real Fancy')
else:
print('regularly fancy') | [
"noreply@github.com"
] | noreply@github.com |
5606b5c260655962cf20f5832309e6fa1fba193e | 077beb02d73045eb97261a1c5e7021bfe709e55c | /tests/new_item/test_invalid_new_item_class.py | 0412c3d225cd16df9a5cc57f7d49be2a1a918cd8 | [
"MIT"
] | permissive | zcutlip/pyonepassword | a91d8491d807c2cede2c483a66872b7913ad3aac | 3ced5acf3667f1af73cad26ae0ef31e8c4b19585 | refs/heads/main | 2023-09-04T03:16:49.170698 | 2023-06-26T19:51:32 | 2023-06-26T19:51:32 | 201,505,055 | 48 | 13 | MIT | 2023-09-05T01:44:18 | 2019-08-09T16:29:56 | Python | UTF-8 | Python | false | false | 619 | py | import pytest
from pyonepassword.api.exceptions import OPInvalidItemException
from pyonepassword.op_items._new_item import OPNewItemMixin
class OPInvalidLoginItemTemplate(OPNewItemMixin):
def __init__(self, title: str, fields=[], sections=[], extra_data={}):
super().__init__(title, fields, sections, extra_data)
def test_invalid_new_item_class_01():
with pytest.raises(OPInvalidItemException):
OPNewItemMixin("invalid-new-item")
def test_invalid_login_item_template_01():
with pytest.raises(OPInvalidItemException):
OPInvalidLoginItemTemplate("invalid login item template")
| [
"uid000@gmail.com"
] | uid000@gmail.com |
aeec4d590db8b63bc37b31a37eb3508e70cfbbd7 | 6de609b27ba697e01d96475ceebd781f7e007026 | /real_vDjBook/bin/pilfile.py | aa7877828caa081050982eaf5129dda1575303d9 | [] | no_license | raksuns/python_django | e284df049d86574bbf91c3894a61acc550101b9e | 22ce23263f950159c6cf0987c4321c40859f480d | refs/heads/master | 2021-01-12T11:27:40.528274 | 2016-11-28T10:57:25 | 2016-11-28T10:57:25 | 72,931,594 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,689 | py | #!/home/shkim/pyDjango/vDjBook/bin/python
#
# The Python Imaging Library.
# $Id$
#
# a utility to identify image files
#
# this script identifies image files, extracting size and
# pixel mode information for known file formats. Note that
# you don't need the PIL C extension to use this module.
#
# History:
# 0.0 1995-09-01 fl Created
# 0.1 1996-05-18 fl Modified options, added debugging mode
# 0.2 1996-12-29 fl Added verify mode
# 0.3 1999-06-05 fl Don't mess up on class exceptions (1.5.2 and later)
# 0.4 2003-09-30 fl Expand wildcards on Windows; robustness tweaks
#
from __future__ import print_function
import getopt
import glob
import logging
import sys
from PIL import Image
if len(sys.argv) == 1:
print("PIL File 0.4/2003-09-30 -- identify image files")
print("Usage: pilfile [option] files...")
print("Options:")
print(" -f list supported file formats")
print(" -i show associated info and tile data")
print(" -v verify file headers")
print(" -q quiet, don't warn for unidentified/missing/broken files")
sys.exit(1)
try:
opt, args = getopt.getopt(sys.argv[1:], "fqivD")
except getopt.error as v:
print(v)
sys.exit(1)
verbose = quiet = verify = 0
logging_level = "WARNING"
for o, a in opt:
if o == "-f":
Image.init()
id = sorted(Image.ID)
print("Supported formats:")
for i in id:
print(i, end=' ')
sys.exit(1)
elif o == "-i":
verbose = 1
elif o == "-q":
quiet = 1
elif o == "-v":
verify = 1
elif o == "-D":
logging_level = "DEBUG"
logging.basicConfig(level=logging_level)
def globfix(files):
# expand wildcards where necessary
if sys.platform == "win32":
out = []
for file in files:
if glob.has_magic(file):
out.extend(glob.glob(file))
else:
out.append(file)
return out
return files
for file in globfix(args):
try:
im = Image.open(file)
print("%s:" % file, im.format, "%dx%d" % im.size, im.mode, end=' ')
if verbose:
print(im.info, im.tile, end=' ')
print()
if verify:
try:
im.verify()
except:
if not quiet:
print("failed to verify image", end=' ')
print("(%s:%s)" % (sys.exc_info()[0], sys.exc_info()[1]))
except IOError as v:
if not quiet:
print(file, "failed:", v)
except:
import traceback
if not quiet:
print(file, "failed:", "unexpected error")
traceback.print_exc(file=sys.stdout)
| [
"raksuns@gmail.com"
] | raksuns@gmail.com |
860b6bc003f95d019e3d704ddab6810b18da8cad | e04acd829e46389392daccc11cbee68255f9af5e | /practice/ex40.py | 9e4e6e35e6be7b0ea83c5979fc2dff13fd69009c | [] | no_license | mahaocheng/pythonhardway | 9b210c1870e3a67e8ab246b6e6dcb744c66a0444 | f61735ba67b10e54cfc460bb7eb2a88e66001912 | refs/heads/master | 2021-01-11T17:59:05.230962 | 2017-04-11T10:21:27 | 2017-04-11T10:21:27 | 79,890,725 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 446 | py | class Song(object):
def __init__(self, lyrics):
self.lyrics = lyrics
def sing_me_a_song(self):
for ma in self.lyrics:
print ma
happy_bday = Song(["Happy birthday to you",
"I don't want to get sued",
"So i'll stop right there"])
bulls_on_parade = Song(["They rally around the family",
"With pockets full of shells"])
happy_bday.sing_me_a_song()
bulls_on_parade.sing_me_a_song()
| [
"mahaocheng1985@126.com"
] | mahaocheng1985@126.com |
440b99869395c2d799afc7da86ab92b42d80e041 | 42b2533118529ef6050676c7b2832390d0051dba | /kafka_example.py | cbdc29c9ce68776e5bf1de66127fed18b7f50ac2 | [] | no_license | kartikeya-calsoft/mqtt_liota_kafka | 1baf5e95a0684d8a64ab1a80eeb56d5d3d060afe | 041b1e589376af50fa05d3e9506afbd7a8b8f3f9 | refs/heads/master | 2021-05-07T22:51:33.994261 | 2017-10-18T06:28:01 | 2017-10-18T06:28:01 | 107,366,636 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,318 | py | import Queue
import logging
import pint
import string
from liota.dccs.dcc import RegistrationFailure
from liota.device_comms.mqtt_device_comms import MqttDeviceComms
from liota.entities.edge_systems.dk300_edge_system import Dk300EdgeSystem
from liota.entities.metrics.metric import Metric
from liota.lib.utilities.utility import read_user_config
# from liota.dccs.graphite import Graphite
from liota.dcc_comms.socket_comms import SocketDccComms
from kafka import KafkaConsumer
from kafka_comms import KafkaDccComms
from _kafka import Kafka
log = logging.getLogger(__name__)
# getting values from conf file
config = read_user_config('samplePropMqtt.conf')
# Create unit registry
ureg = pint.UnitRegistry()
# Store temperature values in Queue
kafka_data = Queue.Queue()
# Callback functions
# To put corresponding values in queue
def callback_kitchen_temp(client, userdata, message):
kitchen_temperature_data.put(float(message.payload))
def callback_living_room_temp(client, userdata, message):
living_room_temperature_data.put(float(message.payload))
def callback_presence(client, data, message):
presence_data.put(float(message.payload))
def callback_kafka(client, data, message):
try:
kafka_data.put({str(string.replace(str(message.topic),"/",".")) : str(message.payload)}) #Excluding part before '/' in topic
except:
pass
# Extract data from Queue
def get_value(queue):
data = kafka_data.get(block=True)
print "Got data "
print data
return data
if __name__ == "__main__":
# Creating EdgeSystem
edge_system = Dk300EdgeSystem(config['EdgeSystemName'])
# Connect with MQTT broker using DeviceComms and subscribe to topics
# Get kitchen and living room temperature values using MQTT channel
kafka = Kafka(KafkaDccComms(ip = config['KafkaIP'], port = str(config['KafkaPort'])))
# graphite = Graphite(SocketDccComms(ip=config['GraphiteIP'],
# port=int(config['GraphitePort'])))
kafka_reg_edge_system = kafka.register(edge_system)
# graphite_reg_edge_system = graphite.register(edge_system)
mqtt_conn = MqttDeviceComms(url = config['BrokerIP'], port = config['BrokerPort'], identity=None,
tls_conf=None,
qos_details=None,
clean_session=True,
keep_alive=config['keep_alive'], enable_authentication=False)
mqtt_conn.subscribe(config['MqttChannel1'],0, callback_kafka)
try:
metric_name = config['MetricName']
content_metric = Metric(
name=metric_name,
unit=None,
interval=1,
aggregation_size=1,
sampling_function=get_value #this is coming from the xmpp device/server
#sampling_function = read_cpu_utilization
#sampling_function = random_fun
)
reg_content_metric = kafka.register(content_metric)
kafka.create_relationship(kafka_reg_edge_system, reg_content_metric)
reg_content_metric.start_collecting()
except RegistrationFailure:
print "Registration to IOTCC failed"
| [
"Kartikeya.Bhatnagar@calsoftinc.com"
] | Kartikeya.Bhatnagar@calsoftinc.com |
00dd467e4af16d74877dc97ac0f1085d681b0a8c | 0b79018d98ca5ae4e4eeae5d7be4bf43381739c4 | /backend/src/scrapper/test.py | b46529b98e1976db4d7152217da18cfa6be877cc | [] | no_license | tsuasian/Dragon-Course-Review | 1560c4da58417eef4e34ce5fa65f2f033800655d | 127e1d3f2fa879b9542eaec54007ce0879aec37c | refs/heads/master | 2023-08-24T15:53:55.246060 | 2021-10-28T08:11:57 | 2021-10-28T08:11:57 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,434 | py | import requests
import bs4
import lxml
import os
import json as json
base_url = 'https://termmasterschedule.drexel.edu'
home_res = requests.get('https://termmasterschedule.drexel.edu/webtms_du/app')
home_soup = bs4.BeautifulSoup(home_res.text,'lxml')
f = open("course_links.txt","w")
"""
What: Extract classes from the table returned when follow subject link
How: Found a very specific attribute width=99% that only that table elements has.
Select "even" and "odd" classnames and parse their content, ignoring Day/Time column
Return: List of classes for a required subject
"""
def get_rows(soup):
result = soup.find_all('table', attrs={"width": "99%"})
arr = []
for r in result:
even = r.select(".even")
odd = r.select(".odd")
arr.append(even)
arr.append(odd)
even_rows = arr[0]
odd_rows = arr[1]
total_rows = even_rows + odd_rows
tt = []
for i in total_rows:
tds = i.find_all('td')
if len(tds) > 5:
tt.append(i)
return tt
"""
What: Extract links based on selector
How: Search soup object based on selector.
Search result for <a> tags and extract "href" link and tag content.
Assemble a dictionary with tag_content as key and "href" as value pairs
Return: Dictionary with "tag_content": "href" structure
"""
def get_links(soup,selector):
result = soup.select(selector)
tmp = []
for block in result:
tmp.append(block.find_all('a',href=True))
link_objects = tmp[0]
links = {}
for link in link_objects:
link_value = link.contents[0]
link_url = link.get('href')
links[link_value] = link_url
return links
"""
What: Extract one level down links tree for term -> college links
How: Loops through dictionary and follow links in the values.
Calls to get_links*() to get sublinks.
Assemble nested dictionary with structure:
{
term: {
college : "href"
}
}
Return: One level down nested dictionary
"""
def term_level_tree(current_tree):
tmp = {}
for term in current_tree:
res = requests.get(base_url + current_tree[term])
college_soup = bs4.BeautifulSoup(res.text,'lxml')
college_links = get_links(college_soup,'#sideLeft')
tmp[term] = college_links
return tmp
"""
What: Extract two level down links tree for term -> college -> subject links
How: Loops through dictionary and follow links in the values.
Calls to get_links*() to get sublinks.
Assemble nested dictionary with structure:
{
term: {
college : {
subject: "href"
}
}
}
Return: Two levels down nested dictionary
"""
def college_level_tree(current_tree):
tmp1 = {}
for term in current_tree:
tmp2 = {}
for college in current_tree[term]:
res = requests.get(base_url + current_tree[term][college])
subject_soup = bs4.BeautifulSoup(res.text,'lxml')
subject_links = get_links(subject_soup,'.collegePanel')
tmp2[college] = subject_links
tmp1[term] = tmp2
return tmp1
"""
What: Extract string representative of a row of class information from three(final) levels down links tree for term -> college -> subject -> class
For testing purposes print() instead of writing to file.
How: Loops through dictionary and follow links in the values.
Calls to get_links*() to get sublinks.
When reached the bottom level, search table data for values and assemble a string.
For testing output is redirected to out.txt file
Return:
"""
def subject_level_tree_print(current_tree):
f = open("a.out", "w")
for term in current_tree:
for college in current_tree[term]:
for subject in current_tree[term][college]:
res = requests.get(base_url + current_tree[term][college][subject])
class_soup = bs4.BeautifulSoup(res.text,'lxml')
r = get_rows(class_soup)
for i in r:
tds = i.find_all('td')
write_string = ""
for c in i:
try:
if len(c.contents) == 1:
write_string = write_string + "|" + c.contents[0]
except:
pass
f.write(write_string + "\n")
print(write_string)
f.close()
def main():
term_links = get_links(home_soup,'.termPanel')
for l in term_links:
write_string = l + ": " + term_links[l] + "\n"
f.write(write_string)
f.close()
#print(term_links)
winter_link_tree = {}
winter_link_tree['Winter Quarter 20-21'] = term_links['Winter Quarter 20-21']
term_college_tree = term_level_tree(winter_link_tree)
print(term_college_tree)
college_subject_tree = college_level_tree(term_college_tree)
subject_level_tree_print(college_subject_tree)
main()
| [
"tchang@gmail.com"
] | tchang@gmail.com |
1b6205e42264a320af0acc5001e8365d4e80aa70 | bda9a317e22707e51e1f78f4ffca8205750f6d95 | /mapbox/app/app/app.py | 0e53e92e233cfc15117da47a1e4a8d72a0cba2c2 | [] | no_license | elroypeter/SeniorProject | 0395e8ffc977ea0f917a8525b5b85ca696fcca19 | 238b8e3c091b0294f620e6db68e897d8b8598ec3 | refs/heads/master | 2020-04-05T11:17:04.943758 | 2018-11-05T20:09:43 | 2018-11-05T20:09:43 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 683 | py | from flask import Flask, render_template
from werkzeug import secure_filename
from app import app, db, models
import os, sys, requests
@app.route('/',methods=['GET','POST'])
def my_maps():
# TODO: need to change this to be dynamic
response = requests.get('http://dblayer:80/jsonData/test.json')
return render_template('index.html', data = response.json())
@app.route('/recommendations', methods=['GET','POST'])
def recommendation():
return render_template('recommendations.html')
@app.route('/urgent', methods=['GET','POST'])
def urgent():
return render_template('urgent.html')
@app.route('/admin', methods=['GET','POST'])
def admin():
return render_template('admin.html')
| [
"j.a.cochran.cs@gmail.com"
] | j.a.cochran.cs@gmail.com |
cdfb76942b472660a93bac1e3dc180069042129d | 2dc7cee4cde492d6a7bbe253e95a7b9e9601cc59 | /config.py | 2fb86f6e715e6622bdd67b4132c7c8812a46e1a4 | [] | no_license | qylshy/myproject | 5a7db09553b6e4d4fa21cdb08afc4ee38b6fdefb | 6c0f4f6b5d98df8e192f64e2390f934ced9ffacb | refs/heads/master | 2020-05-28T11:08:54.043512 | 2019-05-28T07:46:08 | 2019-05-28T07:46:08 | 188,979,787 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 672 | py | import os
basedir = os.path.abspath(os.path.dirname(__file__))
class Config(object):
SECRET_KEY = os.environ.get('SECRET_KEY') or 'you-will-never-guess'
SQLALCHEMY_DATABASE_URI = os.environ.get('DATABASE_URL') or \
'sqlite:///' + os.path.join(basedir, 'app.db')
SQLALCHEMY_TRACK_MODIFICATIONS = False
MAIL_SERVER = os.environ.get('MAIL_SERVER')
MAIL_PORT = int(os.environ.get('MAIL_PORT') or 25)
MAIL_USE_TLS = os.environ.get('MAIL_USE_TLS') is not None
MAIL_USERNAME = os.environ.get('MAIL_USERNAME')
MAIL_PASSWORD = os.environ.get('MAIL_PASSWORD')
ADMINS = ['929130707@qq.com']
POSTS_PER_PAGE = 3
| [
"qiuyunlong@bytedance.com"
] | qiuyunlong@bytedance.com |
b2f8b6a8be09dcc36a04d64d3071afeccb3142cd | e683d9ec39667ee5a935551ec086ed5afd48f962 | /mapper.py | 90d9f7e375808e3fae8bd7cfe526e0a192905552 | [
"MIT"
] | permissive | tompollard/dorian | 08b3890acde74dfa226da8cea9a4870265cd12b4 | e9b7276c8a59ba596278a5379f69dc8ce027f3bc | refs/heads/master | 2020-12-24T14:37:05.419666 | 2015-03-24T19:09:50 | 2015-03-24T19:09:50 | 32,338,882 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 247 | py | #!/usr/bin/env python
import sys
import re
def mapper(stream):
pattern = re.compile('[a-zA-Z][a-zA-Z0-9]*')
for line in stream:
for word in pattern.findall(line):
print word.lower() + '\t' + '1'
mapper(sys.stdin) | [
"tom.pollard.11@ucl.ac.uk"
] | tom.pollard.11@ucl.ac.uk |
aa5ce4f7642dc7712a7bdca6f0c2ed9b99d4fdac | d2a9ff16cdbcc97a65ae01cdcd79be4d560ef7c9 | /homework5/db.py | 7d644cfc342b43a270440041a3cc25410afcffe5 | [] | no_license | humantom88/geekbrains-data-scraping | af3d7231f83ceb86992f01956b9d2184aa560b28 | b02d68788be8a4d500b5433ec6a89f5583864061 | refs/heads/master | 2022-11-06T10:14:25.515797 | 2020-06-28T14:08:54 | 2020-06-28T14:08:54 | 268,997,676 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 876 | py | from pymongo import MongoClient, errors
from pprint import pprint
import zlib
client = MongoClient('localhost', 27017)
db = client['mails_db']
mails_db = db.mails
db2 = client['goods_db']
goods_db = db2.goods
def make_hash(item):
return zlib.adler32(bytes(repr(item), 'utf-8'))
def save_mails_to_db(mails_list):
for mail in mails_list:
mail_hash = make_hash(mail)
mail["_id"] = mail_hash
try:
mails_db.insert_one(mail)
except errors.DuplicateKeyError:
print("Duplicate found for mail: ", mail)
pass
def save_goods_to_db(goods_list):
for good in goods_list:
good_hash = make_hash(good)
good["_id"] = good_hash
try:
goods_db.insert_one(good)
except errors.DuplicateKeyError:
print("Duplicate found for good: ", good)
pass | [
"humantom88@gmail.com"
] | humantom88@gmail.com |
ac68c34a9df77b38ee0be71b8c371854aa47da18 | 142fd48d2c09bc83ba31b96553fc6d27fad596a3 | /v1/202.happy-number.132775164.ac.py | 76ae51a59bdadd727573d185296fe6de77a038ba | [] | no_license | goalong/lc | baaa8ecc55ecdb136271687d21609832f32ccf6e | 7b45d500e65c759cc2e278d33d9d21925a713017 | refs/heads/master | 2021-10-28T03:40:23.534592 | 2019-04-21T14:29:47 | 2019-04-21T14:29:47 | 111,088,996 | 6 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,284 | py | #
# [202] Happy Number
#
# https://leetcode.com/problems/happy-number/description/
#
# algorithms
# Easy (41.39%)
# Total Accepted: 153.8K
# Total Submissions: 371.6K
# Testcase Example: '1'
#
# Write an algorithm to determine if a number is "happy".
#
# A happy number is a number defined by the following process: Starting with
# any positive integer, replace the number by the sum of the squares of its
# digits, and repeat the process until the number equals 1 (where it will
# stay), or it loops endlessly in a cycle which does not include 1. Those
# numbers for which this process ends in 1 are happy numbers.
#
# Example: 19 is a happy number
#
#
# 12 + 92 = 82
# 82 + 22 = 68
# 62 + 82 = 100
# 12 + 02 + 02 = 1
#
#
# Credits:Special thanks to @mithmatt and @ts for adding this problem and
# creating all test cases.
#
class Solution(object):
def isHappy(self, n):
"""
:type n: int
:rtype: bool
"""
# 3 star.
memo = set()
while n not in memo:
memo.add(n)
n = self.get_next(n)
if n == 1:
return True
return False
def get_next(self, num):
num_list = list(str(num))
rs = sum([int(i)*int(i) for i in num_list])
return rs
| [
"along@myw-vicdeiMac.local"
] | along@myw-vicdeiMac.local |
522f7b5624afc3f1cd74452502167aa8d9f5b6d9 | 14be624679f0bd4521989f26263bf1803b2afba5 | /Python3/URI1041.py | ad2c32cc607832a0ec4655cd4e04c6b9a0005934 | [] | no_license | axelaviloff/uri-solutions | 2521e792bbedba23a8320ced3e9c05bf5af8f7e0 | c51b1122b608d65298cff26f3c1ad87ec059e2d0 | refs/heads/master | 2023-05-13T22:17:39.323316 | 2020-10-27T12:37:21 | 2020-10-27T12:37:21 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 362 | py | coordenadas = input()
x, y = coordenadas.split(" ")
x = float(x)
y = float(y)
if x > 0 and y > 0:
print("Q1")
elif x > 0 and y < 0:
print("Q4")
elif x < 0 and y < 0:
print("Q3")
elif x < 0 and y > 0:
print("Q2")
elif x == 0 and (y > 0 or y < 0):
print("Eixo Y")
elif (x > 0 or x < 0) and y == 0:
print("Eixo X")
else:
print("Origem") | [
"axel.aviloff@estudante.uffs.edu.br"
] | axel.aviloff@estudante.uffs.edu.br |
18757dc913ff5692065e3d0722d1a414217f341e | ad668acbbbf321db2dcbf2cc5a330387df814531 | /MyEscapades/coordinateCombat.py | e1eaa6b57bfdcf3a8849cd3e205a35842bc8bf56 | [] | no_license | LIHTU/mc_python_files | 65969323866dd87bde3ddc97d47dc2dce7e6642e | d0408eea3adf59249ba0742e4c9101a42eb8e6c2 | refs/heads/master | 2020-12-24T13:16:54.389643 | 2016-03-20T22:00:15 | 2016-03-20T22:00:15 | 35,831,326 | 4 | 0 | null | null | null | null | UTF-8 | Python | false | false | 521 | py | # Coordinate Combat
# In this minigame the user will be prompted to
# defend themself against mobs by entering in coordinates
# for defensive strucutes and weapons, such as fire or
# trap pits, or TNT.
'''
1. figure out if we can turn mobs on, and make them hostile.
2. learn and implement chat commands.
3. Design minigame and sequencing.
where
which mobs
4. Can we detect whether a mob entity is dead or alive?
5. Maybe we could simulate a mob with moving block monsters, like
the ufo in adventure 8.
'''
| [
"robinanelson@gmail.com"
] | robinanelson@gmail.com |
389ce0bd3e07869ffa7d5d82fc97f0e6114b317e | 1740075fca5d99eee47d8ab10e918be07f544d55 | /catalog/migrations/0002_auto_20191107_1239.py | bd20f9edcd6d8711f45f088ad0c948df3acd2e3a | [] | no_license | Grayw0lf/local_library | 0933bd5d35ef64ee4dc90dd0cdd83686a8eeed3a | 652f0260bfd153138eaee24810685c52f4063b07 | refs/heads/master | 2023-04-30T10:23:38.048841 | 2019-11-13T21:10:09 | 2019-11-13T21:10:09 | 221,551,305 | 1 | 0 | null | 2023-04-21T20:40:05 | 2019-11-13T21:03:49 | Python | UTF-8 | Python | false | false | 688 | py | # Generated by Django 2.2.7 on 2019-11-07 09:39
from django.db import migrations, models
import uuid
class Migration(migrations.Migration):
dependencies = [
('catalog', '0001_initial'),
]
operations = [
migrations.RenameField(
model_name='author',
old_name='date_of_died',
new_name='date_of_death',
),
migrations.AlterField(
model_name='bookinstance',
name='id',
field=models.UUIDField(default=uuid.UUID('976d9b8b-7c2f-4e07-9879-78d7f1d2fe11'), help_text='Unique ID for this particular book across whole library', primary_key=True, serialize=False),
),
]
| [
"akosheev@rambler.ru"
] | akosheev@rambler.ru |
79477d7ab3de33f495c52b4c124955dd2490a742 | 5b6ff2aaad93717f68ec9babbee59234536cb6a4 | /AddField.py | 621a27f3ab062cceb94485888e8db269cb33ec7e | [] | no_license | byrash/py | 46db32a29a2fffe1f2c854fd09b3451ee6b5b98d | 8d532585b015d6304dcca3ccda6d82c18f2f57ac | refs/heads/master | 2021-03-22T05:20:55.517051 | 2017-10-25T05:41:25 | 2017-10-25T05:41:25 | 107,758,596 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 459 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat Oct 14 13:12:34 2017
@author: Shivaji
"""
import os
import glob
import pandas
def addField(indir="/Users/Shivaji/tmp/extracted"):
os.chdir(indir)
fileList=glob.glob("*")
for fileName in fileList:
df = pandas.read_csv(fileName, sep='\s+',header=None)
df["Station"]=[fileName.rsplit("-",1)[0]]*df.shape[0]
df.to_csv(fileName+".csv",index=None,header=None)
| [
"shivaji.byrapaneni@gmail.com"
] | shivaji.byrapaneni@gmail.com |
3fe99be4fc9dd1b4475b5099b5f1a26acdddbb8d | f04eed5e6c4499d22fb8e339667267aa59c8dfc7 | /MonteCarloSimDraw8balls.py | a5de8664cb39a1bcd8e359a74ce2f9906b98f320 | [
"Giftware"
] | permissive | shanjgit/previous-work | 4ca7e29e231498891752307ba4b04c9726f0eb67 | 664cc40bd0b97e3adc10f551e18a4a7a62e5a760 | refs/heads/master | 2021-01-19T20:02:57.744302 | 2017-08-24T17:57:55 | 2017-08-24T17:57:55 | 101,217,213 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 699 | py | import random
def drawing_without_replacement_sim(numTrials):
'''
Runs numTrials trials of a Monte Carlo simulation
of drawing 3 balls out of a bucket containing
4 red and 4 green balls. Balls are not replaced once
drawn. Returns a float - the fraction of times 3
balls of the same color were drawn in the first 3 draws.
'''
suc = 0.0
for i in xrange(numTrials):
box = [0,0,0,0,1,1,1,1]
draw = []
for j in xrange(3):
x = random.choice(box)
draw.append(x)
box.remove(x)
if (draw == [0,0,0] or draw == [1,1,1]):
suc += 1
return suc/float(numTrials)
| [
"noreply@github.com"
] | noreply@github.com |
1e2657793c46ff5d10baa5dde973f110c7e4b49e | 7b9a792ab61f1308577b0abf38283908ae65ca92 | /config.py | 3e0cf778b1427b4d92af6607c46b8bdabad77d89 | [
"MIT"
] | permissive | amoslgl96/micro_app | ae8543f02d8cd7e80a9a4df9ac8f150a4b088e4e | f300011fe964d6c9f49da24e5e212f546972529e | refs/heads/master | 2021-05-05T16:36:09.714685 | 2018-01-13T18:27:09 | 2018-01-13T18:27:09 | 117,356,918 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,418 | py | import os
basedir = os.path.abspath(os.path.dirname(__file__))
class Config:
SECRET_KEY = os.environ.get('SECRET_KEY') or 'hard to guess string'
SQLALCHEMY_COMMIT_ON_TEARDOWN = True
SQLALCHEMY_TRACK_MODIFICATIONS = False
MAIL_SERVER = 'smtp.googlemail.com'
MAIL_PORT = 587
MAIL_USE_TLS = True
MAIL_USERNAME = os.environ.get('MAIL_USERNAME')
MAIL_PASSWORD = os.environ.get('MAIL_PASSWORD')
FLASKY_MAIL_SUBJECT_PREFIX = '[Flasky]'
FLASKY_MAIL_SENDER = 'Flasky Admin <flasky@example.com>'
FLASKY_ADMIN = os.environ.get('FLASKY_ADMIN')
FLASKY_POSTS_PER_PAGE = 20
FLASKY_FOLLOWERS_PER_PAGE = 50
FLASKY_COMMENTS_PER_PAGE = 30
@staticmethod
def init_app(app):
pass
class DevelopmentConfig(Config):
DEBUG = True
SQLALCHEMY_DATABASE_URI = os.environ.get('DEV_DATABASE_URL') or \
'sqlite:///' + os.path.join(basedir, 'data-dev.sqlite')
class TestingConfig(Config):
TESTING = True
SQLALCHEMY_DATABASE_URI = os.environ.get('TEST_DATABASE_URL') or \
'sqlite:///' + os.path.join(basedir, 'data-test.sqlite')
class ProductionConfig(Config):
SQLALCHEMY_DATABASE_URI = os.environ.get('DATABASE_URL') or \
'sqlite:///' + os.path.join(basedir, 'data.sqlite')
config = {
'development': DevelopmentConfig,
'testing': TestingConfig,
'production': ProductionConfig,
'default': DevelopmentConfig
}
| [
"baolgl96@gmail.com"
] | baolgl96@gmail.com |
8b29ce161cc0486a4b357fc0b7e9a4eff0014b1b | 636e304830d60907c778634df346a42399631e7d | /webots-project/controllers/pos-prediction/predictors/predictor_NN.py | 3c6cc3f45a5ebf2afc72941669fe0e87e7a0dd94 | [
"MIT"
] | permissive | varun-projects/webots-thesis | 8784807b42a35dbe00040c3f903cdd4b86251338 | c18c53b281af6c68431b9b3abde07d1934c37dd9 | refs/heads/master | 2023-01-14T12:16:37.984530 | 2020-10-06T14:00:26 | 2020-10-06T14:00:26 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,443 | py | from keras import models
from keras import layers
import numpy as np
import math
import pickle
import os.path
from keras.models import load_model
class PredictorNN:
def __init__(self, data_collector):
self.percentageTraining = .8
self.dc = data_collector
data = self.dc.get_data_frame()
# delete NA examples
data = data.dropna()
# if model exists load it otherwise create it
if os.path.isfile('train_data_model_NN.h5'):
self.inputs = data[['x', 'y', 'theta']]
self.output = data[['sensor_1', 'sensor_2', 'sensor_3', 'sensor_4', 'sensor_5', 'sensor_6', 'sensor_7', 'sensor_8']]
self.model = load_model('train_data_model_NN.h5')
self.inputs_max = self.inputs.max()
self.inputs_min = self.inputs.min()
self.output_max = self.output.max()
self.output_min = self.output.min()
def create_model(self, train_data, train_targets, test_data, test_targets):
k = 4
num_val_samples = len(train_data) // k
num_epochs = 100
all_scores = []
all_mae_histories = []
model = self.build_model(train_data)
history = model.fit(train_data, train_targets, epochs=num_epochs, batch_size=1)
model.save('train_data_model_NN.h5')
f = open('history.pckl', 'wb')
pickle.dump(history, f)
f.close()
mae_history = history.history['mean_absolute_error']
val_mse, val_mae = model.evaluate(test_data, test_targets)
all_scores.append(val_mae)
all_mae_histories.append(mae_history)
print('Scores of the k-fold', all_scores)
print('Saving all scores and mae histories in local files')
# save
f = open('all_scores.pckl', 'wb')
pickle.dump(all_scores, f)
f.close()
f = open('all_mae_histories.pckl', 'wb')
pickle.dump(all_mae_histories, f)
f.close()
return model
def normalize_data(self, train_data, test_data):
mean = train_data.mean(axis=0)
train_data -= mean
std = train_data.std(axis=0)
train_data /= std
test_data -= mean
test_data /= std
def build_model(self, train_data):
model = models.Sequential()
model.add(layers.Dense(64, activation='relu',
input_shape=(train_data.shape[1],)))
model.add(layers.Dense(64, activation='relu'))
model.add(layers.Dense(8))
model.compile(optimizer='rmsprop', loss='mse', metrics=['mae'])
return model
def normalize_inputs(self, inputs):
return (inputs - self.inputs_min)/(self.inputs_max - self.inputs_min)
def denormalize_output(self, outputs):
return outputs*(self.output_max - self.output_min)+self.output_min
def prediction_error(self, x, y, theta, sensors):
features = self.normalize_inputs(np.array([x, y, theta]))
pre_sensors = self.denormalize_output(self.model.predict(np.array([features]))[0])
err = 0
n_sensors = len(sensors)
bad_data = True
# print(true_dist)
for ix, elem in enumerate(pre_sensors):
if not math.isnan(sensors[ix]):
bad_data = False
# print('err', elem)
# print('true', true_dist[ix])
err += (elem - sensors[ix]) ** 2
return 1/err, bad_data
| [
"joan.sebastian.gerard@gmail.com"
] | joan.sebastian.gerard@gmail.com |
696c4f4e21c3eb7fe7ea3890ef830d042a91d421 | c2c86157ae2a4031d79b6f71b3f3cdcad913a87d | /matlabfiles/.svn/text-base/runonebyone.py.svn-base | cad7cd2195024b4366b6e4d261ec8176d6c025ed | [] | no_license | fishdda/Automatic-Radiation-Treatment-Planning-System- | e3875e7d17e96e488c7d678d70da4411213a98c6 | 42eba3a27e62e53907c782f01585abb0de15d7e4 | refs/heads/master | 2020-05-16T16:58:28.804199 | 2014-10-04T16:05:44 | 2014-10-04T16:05:44 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 144 | import os;
cases= [5,25,104,108,113,123,208,30,79];
for c in cases:
cmd = './PrioritizedNlp cases' +str(c)+'.txt';
print cmd
os.system(cmd);
| [
"parastiwari@Paras-Tiwaris-MacBook-Pro.local"
] | parastiwari@Paras-Tiwaris-MacBook-Pro.local | |
2c8f96dfd60e771a4512c4b9b459a21ff197f9ae | e04c3af194afacf7e454eb63a1f917c0df46698d | /MAST/test/workflow_test/workflow_setup.py | 4fecd61563ecda63785f435c50709a593de50be3 | [
"MIT"
] | permissive | kcantosh/MAST | 050716de2580fe53cf241b0d281a84f13175b542 | 4138b87e5a1038eb65023232f80907333d3196f2 | refs/heads/dev | 2021-01-20T16:51:22.759949 | 2017-01-31T16:40:45 | 2017-01-31T16:40:45 | 82,833,665 | 0 | 1 | null | 2017-02-22T17:34:13 | 2017-02-22T17:34:13 | null | UTF-8 | Python | false | false | 4,053 | py | ##############################################################
# This code is part of the MAterials Simulation Toolkit (MAST)
#
# Maintainer: Tam Mayeshiba
# Last updated: 2016-02-08
##############################################################
##############################################################
# Requirements:
# 1. Home directory access from where the test will be run
# 2. MAST installation
##############################################################
import os
import time
import shutil
import numpy as np
from MAST.utility import MASTError
from MAST.utility import dirutil
from MAST.utility import MASTFile
import MAST
import subprocess
testname ="workflow_test"
testdir = dirutil.get_test_dir(testname)
checkname = os.path.join(testdir, "WORKFLOW_CONFIG")
def verify_checks():
checkfile=MASTFile(checkname)
for myline in checkfile.data:
if "Check" in myline:
checkresult = myline.split(":")[1].strip()[0].lower()
if checkresult == 'y':
print "Checks okay"
else:
raise MASTError("verify checks","Checks for workflow setup not verified. Check %s" % checkname)
return
def get_variables():
verify_checks()
myvars=dict()
checkfile=MASTFile(checkname)
for myline in checkfile.data:
if myline[0:9] == "workflow_":
mykey = myline.split("=")[0].strip()
myval = myline.split("=")[1].strip()
myvars[mykey] = myval
return myvars
def create_workflow_test_script(inputfile):
myvars = get_variables()
# set up testing directory tree
wtdir=myvars['workflow_test_directory']
mast_test_dir=os.path.join(wtdir,"no_directory_yet")
while not (os.path.isdir(mast_test_dir)):
timestamp=time.strftime("%Y%m%dT%H%M%S")
mast_test_dir = os.path.join(wtdir,"output_test_%s" % timestamp)
if not (os.path.isdir(mast_test_dir)):
shutil.copytree("%s/mini_mast_tree" % wtdir, mast_test_dir)
# set up output file and submission script
shortname = inputfile.split(".")[0]
output="%s/output_%s" % (wtdir, shortname)
submitscript="%s/submit_%s.sh" % (wtdir, shortname)
generic_script="%s/generic_mast_workflow.sh" % wtdir
bashcommand="bash %s %s %s %s %s %s >> %s" % (generic_script,
mast_test_dir,
myvars["workflow_examples_located"],
inputfile,
myvars["workflow_activate_command"],
myvars["workflow_testing_environment"],
output)
submitfile=MASTFile()
submitfile.data.append(bashcommand + "\n")
submitfile.to_file(submitscript)
return [mast_test_dir, submitscript, output]
def generic_submit(inputfile):
[mast_test_dir, submitscript, outputname] = create_workflow_test_script(inputfile)
mygsub = "bash %s" % submitscript
gproc = subprocess.Popen(mygsub, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
gproc.wait()
if not (os.path.isfile(outputname)):
print "Sleep 5"
time.sleep(5)
if not (os.path.isfile(outputname)):
raise OSError("Test did not create output %s" % outputname)
print "Output %s created" % outputname
waitct=0
tailcmd = "tail -n 3 %s" % outputname
maxwait=502
while waitct < maxwait:
tail3proc=subprocess.Popen(tailcmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
tail3=tail3proc.communicate()[0]
tail3proc.wait()
for tailline in tail3.split("\n"):
if "Workflow completed" in tailline:
return ["Completed", mast_test_dir]
time.sleep(30)
waitct = waitct + 1
print "Output not complete. Attempt %i/%i" % (waitct, maxwait)
return ["Unfinished", mast_test_dir]
def get_finished_recipe_dir(mast_test_dir):
trydirs=os.listdir(os.path.join(mast_test_dir,"ARCHIVE"))
for trydir in trydirs:
trypath=os.path.join(mast_test_dir,"ARCHIVE",trydir)
if (os.path.isdir(trypath)):
return trypath
return ""
| [
"mayeshiba@wisc.edu"
] | mayeshiba@wisc.edu |
4e24c93448376bf4ec8685ec08596212224928a2 | 45c13e4f2204c711c8ddb619a423fa2802df93fc | /blogspace/articles/migrations/0003_article_author.py | 007e5bb909ab9152f422c1cd19343ec87b121f7c | [] | no_license | darklongnightt/blogspace | f7d287cf91e1900893676e4815078e41e7fc4f7f | ba67b4f9a621c5ac4a8cf5c016bdbf0617b40f00 | refs/heads/master | 2020-11-30T10:21:40.397046 | 2019-12-28T14:18:37 | 2019-12-28T14:18:37 | 230,376,202 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 592 | py | # Generated by Django 3.0.1 on 2019-12-28 11:43
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('articles', '0002_article_thumbnail'),
]
operations = [
migrations.AddField(
model_name='article',
name='author',
field=models.ForeignKey(default=None, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
]
| [
"toki.1243@gmail.com"
] | toki.1243@gmail.com |
61e7ac4e48aa441ddac70c7a136199bc95ef0cb8 | 97be97cfc56fb2170b60b91063dbfe5f1449e3c0 | /python/ABC189/D.py | 7e2f46bfedc29c348c5d23cf98f1faf6718dbc94 | [] | no_license | iWonder118/atcoder | 73d965a0a9ade189733808e47634f2b7776aad4b | 3ab7271e838a2903ff0e07f94015ef13c59577e1 | refs/heads/master | 2022-01-25T10:10:55.007340 | 2021-12-31T14:04:54 | 2021-12-31T14:04:54 | 245,155,997 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 77 | py | n = int(input())
logics = [input() for _ in range(n)]
for i in range(n):
| [
"52240372+iWonder118@users.noreply.github.com"
] | 52240372+iWonder118@users.noreply.github.com |
965ccfbb787575189bbd405fdd1a466953457af5 | ee7596f2efcf9abf86ff312bb1f0a0f963e7787d | /chunking/main.py | ce4637ab17240a729280d041ed31f638b89aeb96 | [] | no_license | the-league-of-legends/chunk | 8edfb906fbfba0e291bd6adebcf95831d0491d71 | b8d6327c2ebd4a4a41c94d4fb4322c15ff039f4d | refs/heads/master | 2020-06-20T02:59:59.629299 | 2019-07-15T09:34:25 | 2019-07-15T09:34:25 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 14,936 | py | # -*-encoding=utf8-*-
import json
import codecs
import itertools
from collections import OrderedDict
import os
import sys
from itertools import chain
import tensorflow as tf
import numpy as np
from tensorflow.contrib.crf import crf_log_likelihood
from tensorflow.contrib.crf import viterbi_decode
from tensorflow.contrib.layers.python.layers import initializers
currentPath = os.getcwd()
sys.path.append(currentPath)
import jieba
import jieba.posseg as pseg
root_path = os.getcwd()
global pyversion
if sys.version > '3':
pyversion = 'three'
else:
pyversion = 'two'
if pyversion == 'three':
import pickle
else:
import cPickle, pickle
root_path = os.getcwd() + os.sep
CONFIG = {
}
class Model(object):
# 初始化模型参数
def __init__(self, config):
self.config = config
self.lr = config["lr"]
self.char_dim = config["char_dim"]
self.lstm_dim = config["lstm_dim"]
self.seg_dim = config["seg_dim"]
self.num_tags = config["num_tags"]
self.num_chars = config["num_chars"] # 样本中总字数
self.num_segs = 4
self.global_step = tf.Variable(0, trainable=False)
self.best_dev_f1 = tf.Variable(0.0, trainable=False)
self.best_test_f1 = tf.Variable(0.0, trainable=False)
self.initializer = initializers.xavier_initializer()
self.char_inputs = tf.placeholder(dtype=tf.int32,
shape=[None, None],
name="ChatInputs")
self.seg_inputs = tf.placeholder(dtype=tf.int32,
shape=[None, None],
name="SegInputs")
self.targets = tf.placeholder(dtype=tf.int32,
shape=[None, None],
name="Targets")
# dropout keep prob
self.dropout = tf.placeholder(dtype=tf.float32,
name="Dropout")
used = tf.sign(tf.abs(self.char_inputs))
length = tf.reduce_sum(used, reduction_indices=1)
self.lengths = tf.cast(length, tf.int32)
self.batch_size = tf.shape(self.char_inputs)[0]
self.num_steps = tf.shape(self.char_inputs)[-1]
self.model_type = config['model_type']
self.layers = [
{
'dilation': 1
},
{
'dilation': 1
},
{
'dilation': 2
},
]
self.filter_width = 3
self.num_filter = self.lstm_dim
self.embedding_dim = self.char_dim + self.seg_dim
self.repeat_times = 4
self.cnn_output_width = 0
embedding = self.embedding_layer(self.char_inputs, self.seg_inputs, config)
if self.model_type == 'idcnn':
model_inputs = tf.nn.dropout(embedding, self.dropout)
model_outputs = self.IDCNN_layer(model_inputs)
self.logits = self.project_layer_idcnn(model_outputs)
else:
raise KeyError
self.loss = self.loss_layer(self.logits, self.lengths)
with tf.variable_scope("optimizer"):
optimizer = self.config["optimizer"]
if optimizer == "sgd":
self.opt = tf.train.GradientDescentOptimizer(self.lr)
elif optimizer == "adam":
self.opt = tf.train.AdamOptimizer(self.lr)
elif optimizer == "adgrad":
self.opt = tf.train.AdagradOptimizer(self.lr)
else:
raise KeyError
grads_vars = self.opt.compute_gradients(self.loss)
capped_grads_vars = [[tf.clip_by_value(g, -self.config["clip"], self.config["clip"]), v]
for g, v in grads_vars]
self.train_op = self.opt.apply_gradients(capped_grads_vars, self.global_step)
self.saver = tf.train.Saver(tf.global_variables(), max_to_keep=5)
def embedding_layer(self, char_inputs, seg_inputs, config, name=None):
embedding = []
self.char_inputs_test = char_inputs
self.seg_inputs_test = seg_inputs
with tf.variable_scope("char_embedding" if not name else name):
self.char_lookup = tf.get_variable(
name="char_embedding",
shape=[self.num_chars, self.char_dim],
initializer=self.initializer)
embedding.append(tf.nn.embedding_lookup(self.char_lookup, char_inputs))
if config["seg_dim"]:
with tf.variable_scope("seg_embedding"):
self.seg_lookup = tf.get_variable(
name="seg_embedding",
# shape=[4*20]
shape=[self.num_segs, self.seg_dim],
initializer=self.initializer)
embedding.append(tf.nn.embedding_lookup(self.seg_lookup, seg_inputs))
embed = tf.concat(embedding, axis=-1)
self.embed_test = embed
self.embedding_test = embedding
return embed
def IDCNN_layer(self, model_inputs,
name=None):
model_inputs = tf.expand_dims(model_inputs, 1)
self.model_inputs_test = model_inputs
reuse = False
if self.dropout == 1.0:
reuse = True
with tf.variable_scope("idcnn" if not name else name):
# shape=[1*3*120*100]
shape = [1, self.filter_width, self.embedding_dim,
self.num_filter]
print(shape)
filter_weights = tf.get_variable(
"idcnn_filter",
shape=[1, self.filter_width, self.embedding_dim,
self.num_filter],
initializer=self.initializer)
layerInput = tf.nn.conv2d(model_inputs,
filter_weights,
strides=[1, 1, 1, 1],
padding="SAME",
name="init_layer", use_cudnn_on_gpu=True)
self.layerInput_test = layerInput
finalOutFromLayers = []
totalWidthForLastDim = 0
for j in range(self.repeat_times):
for i in range(len(self.layers)):
# 1,1,2
dilation = self.layers[i]['dilation']
isLast = True if i == (len(self.layers) - 1) else False
with tf.variable_scope("atrous-conv-layer-%d" % i,
reuse=True
if (reuse or j > 0) else False):
w = tf.get_variable(
"filterW",
shape=[1, self.filter_width, self.num_filter,
self.num_filter],
initializer=tf.contrib.layers.xavier_initializer())
if j == 1 and i == 1:
self.w_test_1 = w
if j == 2 and i == 1:
self.w_test_2 = w
b = tf.get_variable("filterB", shape=[self.num_filter])
conv = tf.nn.atrous_conv2d(layerInput,
w,
rate=dilation,
padding="SAME")
self.conv_test = conv
conv = tf.nn.bias_add(conv, b)
conv = tf.nn.relu(conv)
if isLast:
finalOutFromLayers.append(conv)
totalWidthForLastDim += self.num_filter
layerInput = conv
finalOut = tf.concat(axis=3, values=finalOutFromLayers)
keepProb = 1.0 if reuse else 0.5
finalOut = tf.nn.dropout(finalOut, keepProb)
finalOut = tf.squeeze(finalOut, [1])
finalOut = tf.reshape(finalOut, [-1, totalWidthForLastDim])
self.cnn_output_width = totalWidthForLastDim
return finalOut
def project_layer_idcnn(self, idcnn_outputs, name=None):
with tf.variable_scope("project" if not name else name):
with tf.variable_scope("logits"):
W = tf.get_variable("W", shape=[self.cnn_output_width, self.num_tags],
dtype=tf.float32, initializer=self.initializer)
b = tf.get_variable("b", initializer=tf.constant(0.001, shape=[self.num_tags]))
pred = tf.nn.xw_plus_b(idcnn_outputs, W, b)
return tf.reshape(pred, [-1, self.num_steps, self.num_tags])
def loss_layer(self, project_logits, lengths, name='crf_loss'):
with tf.variable_scope(name):
small = -1000.0
start_logits = tf.concat(
[small * tf.ones(shape=[self.batch_size, 1, self.num_tags]), tf.zeros(shape=[self.batch_size, 1, 1])],
axis=-1)
pad_logits = tf.cast(small * tf.ones([self.batch_size, self.num_steps, 1]), tf.float32)
logits = tf.concat([project_logits, pad_logits], axis=-1)
logits = tf.concat([start_logits, logits], axis=1)
targets = tf.concat(
[tf.cast(self.num_tags * tf.ones([self.batch_size, 1]), tf.int32), self.targets], axis=-1)
self.trans = tf.get_variable(
"transitions",
shape=[self.num_tags + 1, self.num_tags + 1],
initializer=self.initializer)
log_likelihood, self.trans = crf_log_likelihood(
inputs=logits,
tag_indices=targets,
transition_params=self.trans,
sequence_lengths=lengths + 1)
return tf.reduce_mean(-log_likelihood)
def create_feed_dict(self, batch):
_, chars, segs, tags = batch
feed_dict = {
self.char_inputs: np.asarray(chars),
self.seg_inputs: np.asarray(segs),
self.dropout: 1.0,
}
return feed_dict
def run_step(self, sess, batch):
feed_dict = self.create_feed_dict(batch)
lengths, logits = sess.run([self.lengths, self.logits], feed_dict)
return lengths, logits
def decode(self, logits, lengths, matrix):
paths = []
small = -1000.0
start = np.asarray([[small] * self.num_tags + [0]])
for score, length in zip(logits, lengths):
score = score[:length]
pad = small * np.ones([length, 1])
logits = np.concatenate([score, pad], axis=1)
logits = np.concatenate([start, logits], axis=0)
path, _ = viterbi_decode(logits, matrix)
paths.append(path[1:])
return paths
def result_to_json(self, string, tags):
item = {"string": string, "entities": []}
entity_name = ""
entity_start = 0
idx = 0
for char, tag in zip(string, tags):
if tag[0] == "S":
item["entities"].append({"word": char, "start": idx, "end": idx + 1, "type": tag[2:]})
elif tag[0] == "B":
entity_name += char
entity_start = idx
elif tag[0] == "I":
entity_name += char
elif tag[0] == "E":
entity_name += char
item["entities"].append({"word": entity_name, "start": entity_start, "end": idx + 1, "type": tag[2:]})
entity_name = ""
else:
entity_name = ""
entity_start = idx
idx += 1
return item
def evaluate_line(self, sess, inputs, id_to_tag):
trans = self.trans.eval(session=sess)
lengths, scores = self.run_step(sess, inputs)
batch_paths = self.decode(scores, lengths, trans)
tags = [id_to_tag[idx] for idx in batch_paths[0]]
return self.result_to_json(inputs[0][0], tags)
class Chunk(object):
def __init__(self):
self.config_file = json.load(open("config_file", encoding="utf8"))
self.tf_config = tf.ConfigProto()
self.sess = tf.Session(config=self.tf_config)
self.sess.run(tf.global_variables_initializer())
self.maps = "maps.pkl"
if pyversion == 'three':
self.char_to_id, self.id_to_char, self.tag_to_id, self.id_to_tag = pickle.load(open(self.maps, "rb"))
else:
self.char_to_id, self.id_to_char, self.tag_to_id, self.id_to_tag = pickle.load(open(self.maps, "rb"),
protocol=2)
self.model = Model(self.config_file)
self.ckpt = tf.train.get_checkpoint_state("ckpt")
if self.ckpt and tf.train.checkpoint_exists(self.ckpt.model_checkpoint_path):
print("Reading model parameters from %s" % self.ckpt.model_checkpoint_path)
self.model.saver.restore(self.sess, self.ckpt.model_checkpoint_path)
else:
print("No model file")
def features(self, string):
def _w2f(word):
lenth = len(word)
if lenth == 1:
r = [0]
if lenth > 1:
r = [2] * lenth
r[0] = 1
r[-1] = 3
return r
return list(chain.from_iterable([_w2f(word) for word in jieba.cut(string) if len(word.strip()) > 0]))
def get_text_input(self, text):
inputs = list()
inputs.append([text])
D = self.char_to_id["<UNK>"]
inputs.append([[self.char_to_id.setdefault(char, D)
for char in text if len(char.strip()) > 0]])
inputs.append([self.features(text)])
inputs.append([[]])
if len(text.strip()) > 1:
return self.model.evaluate_line(self.sess, inputs, self.id_to_tag)
if __name__ == "__main__":
c = Chunk()
for line in open('text.txt', 'r', encoding='utf8'):
print(c.get_text_input(line.strip()))
# s="典型胸痛 因体力活动、情绪激动等诱发,突感心前区疼痛,多为发作性绞痛或压榨痛,也可为憋闷感。疼痛从胸骨后或心前区开始,向上放射至左肩、臂,甚至小指和无名指,休息或含服硝酸甘油可缓解。胸痛放散的部位也可涉及颈部、下颌、牙齿、腹部等。胸痛也可出现在安静状态下或夜间,由冠脉痉挛所致,也称变异型心绞痛。如胸痛性质发生变化,如新近出现的进行性胸痛,痛阈逐步下降,以至稍事体力活动或情绪激动甚至休息或熟睡时亦可发作。疼痛逐渐加剧、变频,持续时间延长,祛除诱因或含服硝酸甘油不能缓解,此时往往怀疑不稳定心绞痛。"
# print(c.get_text_input(s))
| [
"lsvt@lsvtdeiMac.local"
] | lsvt@lsvtdeiMac.local |
9be50d39d015e172e51c97d330d5fe5035965ef5 | b8e3363a40bc9928ae85c16232c5bf6240597a18 | /out/production/home-assistant/components/switch/tellduslive.py | 7edab40054f51d8807a01fa0c066ed3cb09c138f | [
"MIT"
] | permissive | LaurentTrk/home-assistant | 4cbffd5a71f914e003918542319bc6caa96dbb72 | 5a808d4e7df4d8d0f12cc5b7e6cff0ddf42b1d40 | refs/heads/dev | 2021-01-15T23:02:38.147063 | 2016-05-15T12:21:52 | 2016-05-15T12:21:52 | 51,471,180 | 2 | 0 | null | 2016-02-10T20:49:47 | 2016-02-10T20:49:47 | null | UTF-8 | Python | false | false | 1,832 | py | """
homeassistant.components.switch.tellduslive
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Support for Tellstick switches using Tellstick Net and
the Telldus Live online service.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/switch.tellduslive/
"""
import logging
from homeassistant.components import tellduslive
from homeassistant.helpers.entity import ToggleEntity
_LOGGER = logging.getLogger(__name__)
def setup_platform(hass, config, add_devices, discovery_info=None):
""" Find and return Tellstick switches. """
if discovery_info is None:
return
add_devices(TelldusLiveSwitch(switch) for switch in discovery_info)
class TelldusLiveSwitch(ToggleEntity):
""" Represents a Tellstick switch. """
def __init__(self, switch_id):
self._id = switch_id
self.update()
_LOGGER.debug("created switch %s", self)
def update(self):
tellduslive.NETWORK.update_switches()
self._switch = tellduslive.NETWORK.get_switch(self._id)
@property
def should_poll(self):
""" Tells Home Assistant to poll this entity. """
return True
@property
def name(self):
""" Returns the name of the switch if any. """
return self._switch["name"]
@property
def available(self):
return not self._switch.get("offline", False)
@property
def is_on(self):
""" True if switch is on. """
from tellive.live import const
return self._switch["state"] == const.TELLSTICK_TURNON
def turn_on(self, **kwargs):
""" Turns the switch on. """
tellduslive.NETWORK.turn_switch_on(self._id)
def turn_off(self, **kwargs):
""" Turns the switch off. """
tellduslive.NETWORK.turn_switch_off(self._id)
| [
"laurent.turek_github@gadz.org"
] | laurent.turek_github@gadz.org |
e72b7714ac7c6c70fcad4cd97133be4a97489a94 | 72a934f4940c4ae77682d45a2d1e8ec5b1e2ff01 | /pro/models/sequential.py | 18d7368dc2b1336ad3c0ea4e895f26cf5c057335 | [] | no_license | dhaval-jain/g1 | 5347160fcf4efc21207fdf9f996a10dd4e0f61e9 | a6d4deb672204b9eaf1efc5c6e0c12f38b5bb906 | refs/heads/master | 2023-03-18T22:49:03.559327 | 2020-11-17T16:59:50 | 2020-11-17T16:59:50 | 346,329,013 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,117 | py | TRAIN_PATH = "C:/Users/meow/PycharmProjects/project1/CovidDataset/Train" # gets the the paths in that folder
VAL_PATH = "C:/Users/meow/PycharmProjects/project1/CovidDataset/Val"
import numpy as np
import matplotlib.pyplot as plt
import keras
from keras.layers import *
from keras.models import *
from keras.preprocessing import image
import tensorflow as tf
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Dropout, Activation, Flatten, Conv2D, MaxPooling2D, Conv3D
model = Sequential()
model.add(Conv2D(32,kernel_size=(3,3),activation='relu',input_shape=(224,224,3)))
model.add(Conv2D(64,(3,3),activation='relu'))
model.add(MaxPooling2D(pool_size=(2,2)))
model.add(Dropout(0.25))
model.add(Conv2D(64,(3,3),activation='relu'))
model.add(MaxPooling2D(pool_size=(2,2)))
model.add(Dropout(0.25))
model.add(Conv2D(128,(3,3),activation='relu'))
model.add(MaxPooling2D(pool_size=(2,2)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(64,activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(1,activation='sigmoid'))
model.compile(loss=keras.losses.binary_crossentropy,optimizer='adam',metrics=['accuracy'])
model.summary()
# Use the Image Data Generator to import the images from the dataset
from keras.preprocessing.image import ImageDataGenerator
train_datagen = ImageDataGenerator(rescale = 1./255,
shear_range = 0.2,
zoom_range = 0.2,
horizontal_flip = True)
test_datagen = ImageDataGenerator(rescale = 1./255)
# Make sure you provide the same target size as initialied for the image size
training_set = train_datagen.flow_from_directory('C:/Users/meow/PycharmProjects/project1/CovidDataset/Train',
target_size = (224, 224),
batch_size = 16,
class_mode = 'categorical')
training_set.class_indices
test_set = test_datagen.flow_from_directory('C:/Users/meow/PycharmProjects/project1/CovidDataset/Val',
target_size = (224, 224),
batch_size = 16,
class_mode = 'categorical')
r = model.fit_generator(
training_set,
validation_data=test_set,
epochs=20,
steps_per_epoch=len(training_set),
validation_steps=len(test_set)
)
# save it as a h5 file
import tensorflow as tf
from keras.models import load_model
model.save('model_sequential_14.h5')
model_json = model.to_json()
with open('model_adam_sequential_2020.json', 'w') as json_file:
json_file.write(model_json)
print('Model saved to the disk.')
plt.plot(r.history['loss'], label='train_loss')
plt.plot(r.history['val_loss'], label='val_loss')
plt.legend()
plt.show()
plt.savefig('LossVal_loss')
# plot the accuracy
plt.plot(r.history['accuracy'], label='train_accuracy')
plt.plot(r.history['val_accuracy'], label='val_accuracy')
plt.legend()
plt.show()
plt.savefig('AccVal_accuracy')
| [
"inexorable619@gmail.com"
] | inexorable619@gmail.com |
8eb5717f4d3e6a853f98ec128fe586d28b3c4c9f | 0e29d70a54fa89cb7cb468529c8601d0ddf7b1b5 | /date_migrate.py | 5d78960a9634fe1cc69bd81af1c79ded42e0293e | [] | no_license | BishopJustice/MultiPage | f715d845137ed844d789b705a783b996ddb8f5a8 | fbd3858790485abfb8120618cd936fd94e61d61d | refs/heads/master | 2020-07-03T19:18:09.147296 | 2016-10-24T23:26:20 | 2016-10-24T23:26:20 | 66,682,896 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 722 | py | from app.models import User, Item
from app import db
import datetime
from dateutil import parser
# users = db.session.query(User).all()
# for each in users:
# if each.joined:
# each.joined = datetime.datetime.strptime(each.joined, '%Y/%m/%d %H:%M:%S')
items = db.session.query(Item).all()
# for each in items:
# print each.opened_at.date()
for each in items:
if each.opened_at:
each.opened_at = parser.parse(each.opened_at)
db.session.add(each.opened_at)
print type(each.opened_at)
if each.resolved_at:
# each.resolved_at = parser.parse(each.resolved_at)
# db.session.add(each)
print type(each.resolved_at)
db.session.commit()
print "Done!" | [
"luke@lyft.com"
] | luke@lyft.com |
58df714142bc8b34b29e30a57f33a9a9cdc9faf6 | 6ca4a9f5483c754d12cecca3263bdf798a1d3447 | /src/cleaning.py | ab72ec613cfb0b06fb3856da74de64357bba8b6c | [] | no_license | Esaslow/ChurnCaseStudy | 712c37ab13a0c2a9cc2ba1071a5d48d2db665376 | f510f64525ad1b20584e630773376bd233ce96f6 | refs/heads/master | 2021-04-18T21:30:01.159227 | 2018-04-04T05:50:38 | 2018-04-04T05:50:38 | 126,740,944 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,312 | py | import pandas as pd
import numpy as np
from datetime import timedelta
import matplotlib.pyplot as plt
from importlib import reload
from src import cleaning as C
from sklearn.preprocessing import StandardScaler
def clean_city(df):
"""
Input: DataFrame
New columns, the 'city' column to dummy values
New column, the 'city' column converted to integers
Output: DataFrame
"""
working_df = df.copy()
# Duplcate, sacrifical 'city' column
working_df['raw_city'] = working_df['city']
# Make Dummy Columns
new_df = pd.get_dummies(working_df, columns=['raw_city'], drop_first=True)
# Rename dummy columns
new_df_names = new_df.rename(index=str, \
columns={"raw_city_King's Landing": "kings_landing",
"raw_city_Winterfell": "winterfell"})
# Create column of city names mapped to numerical categories
new_df_names['city_categories'] = \
new_df_names['city'].map({'Astapor':1,
'Winterfell':2,
"King's Landing":3})
return new_df_names
def add_target(df):
'''
INPUTS:
df = data frame with col for last trip data that has last date as a
pandas date time object
-------------------
OUTPUTS:
df = data frame with col added called within_last 30 days
Returns 1 if last ride was greater than 30 days away
Returns 0 if last ride was less than 30 days
1 => CHURN
0 => NO CHURN
'''
working_df = df.copy()
latest = max(working_df['last_trip_date'])
Last_trip = (latest - working_df['last_trip_date'])
within_last_30 = (Last_trip > timedelta(days = 30)) * 1
working_df['within_last_30'] = within_last_30
working_df['within_last_60'] = working_df['within_last_30']
working_df.loc[Last_trip > timedelta(days = 60),'within_last_60'] = 2
return working_df
def read_data(file_path):
'''
INPUTS:
filepath: tells where the data is located in reference to the current
directory
OUTPUTS:
Data frame that has the last trip date parsed for the last ride date
and the signup date
'''
df = pd.read_csv(file_path,parse_dates= ['last_trip_date','signup_date'])
return df
def clean_rtg_of_driver(df):
'''
Cleaning the 'rtg_of_driver' column and creating 3 new columns:
1. Column where we replace all np.nan to the median.
2. Column where we replace all np.nan to the mode.
3. Column where we replace all np.nan to the mean.
4. Column where we create a scaled version of original
while replacing all np.nan to median.
'''
df_copy = df.copy()
# Create column replacing np.nan to median.
median = df_copy.avg_rating_of_driver.median()
df_copy['avg_rating_of_driver_median'] = df_copy.avg_rating_of_driver.fillna(median)
# Create column replacing np.nan to mode.
mode = df_copy.avg_rating_of_driver.mode()[0]
df_copy['avg_rating_of_driver_mode'] = df_copy.avg_rating_of_driver.fillna(mode)
# Create column replacing np.nan to mean.
mean = df_copy.avg_rating_of_driver.mean()
df_copy['avg_rating_of_driver_mean'] = df_copy.avg_rating_of_driver.fillna(mean)
# Normalized column based off median
size = df_copy['avg_rating_of_driver_median'].shape[0]
scaler = StandardScaler()
df_copy['avg_rating_of_driver_normalized'] = (scaler.fit_transform(df_copy['avg_rating_of_driver_median']
.values.reshape(size,1)))
return df_copy
def cleaning_avg_rating_by_driver(df):
#make a copy of the dataframe
df_copy = df.copy()
# filling in Nans with column median
rating_by_driver = df_copy['avg_rating_by_driver']
median = df_copy['avg_rating_by_driver'].median()
rating_by_driver_median = rating_by_driver.fillna(median)
#create cleaned column
df_copy['rating_by_driver_median'] = rating_by_driver_median
# Normalized column based off median
size = df_copy['rating_by_driver_median'].shape[0]
scaler = StandardScaler()
#scaler.fit
df_copy['rating_by_driver_median_normalized'] = scaler.fit_transform(df_copy['rating_by_driver_median'].values.reshape(size,1))
return df_copy
def clean_luxury_user(df):
working_df = df.copy()
ludf = working_df['luxury_car_user']
num_ludf = ludf*1
working_df['num_Luxury_User'] = num_ludf
return working_df
def remove_july(df):
working_df = df.copy()
working_df = working_df.loc[working_df['last_trip_date'].dt.month != 7,:]
return working_df
def plot_(df,target,ax):
ax[0].hist(df.trips_in_first_30_days[target == 0],bins = list(np.linspace(0,20,50)),alpha = .6,label = 'no churn',normed = 1);
ax[0].hist(df.trips_in_first_30_days[target == 1],bins = list(np.linspace(0,20,50)),alpha = .6,label = '30 day churn',normed = 1);
ax[0].set_xlim([-1,20])
ax[0].legend()
ax[0].set_xlabel('Number of rides in the First 30 days')
ax[0].set_ylabel('Normalized Count');
ax[0].grid(alpha = .2,color = 'r',linestyle = '--')
ax[0].set_title('Number of rides in First 30 days hist')
ax[1].hist(df.weekday_pct[target == 0],alpha = .6,label = 'no churn',normed = 1);
ax[1].hist(df.weekday_pct[target == 1],alpha = .6,label = '30 day churn',normed = 1);
ax[1].set_xlim([-1,110])
ax[1].legend()
ax[1].set_xlabel('Week day Percent')
ax[1].set_title('Weekday Percent hist')
ax[1].grid(alpha = .2,color = 'r',linestyle = '--')
ax[2].hist(df.surge_pct[target == 0],alpha = .6,label = 'no churn',normed = 1);
ax[2].hist(df.surge_pct[target == 1],alpha = .6,label = '30 day churn',normed = 1);
ax[2].set_xlim([-1,110])
ax[2].legend()
ax[2].set_xlabel('Surge Percent')
ax[2].set_title('Surge Percent hist')
ax[2].grid(alpha = .2,color = 'r',linestyle = '--')
ax[3].hist(df.avg_dist[target == 0],bins = list(np.linspace(0,60,40)),alpha = .6,label = 'no churn',normed = 1);
ax[3].hist(df.avg_dist[target == 1],bins = list(np.linspace(0,60,40)),alpha = .6,label = '30 day churn',normed = 1);
ax[3].set_xlim([-1,40])
ax[3].legend()
ax[3].set_xlabel('Avg Distance')
ax[3].set_title('Average Distance hist')
ax[3].grid(alpha = .2,color = 'r',linestyle = '--')
return ax
| [
"Elsa7762@colorado.edu"
] | Elsa7762@colorado.edu |
d487e6f13e0f9607074d24d0dcca3b4571ee9366 | 00fac941f4f9e39cda9e3286d5cc1a77bda1d888 | /GetCode.py | 39bd6b4ba3989967c5cbd3a4f5254a2935a566c6 | [] | no_license | bluegray/Sublime-Text-3-config | feb340139f01dd35e63874cb26c5e6515b3368bc | 73428ebd263c60472cb7de64028c559252ed8c5e | refs/heads/master | 2020-04-10T14:00:33.960991 | 2015-05-16T12:06:12 | 2015-05-16T12:06:30 | 33,752,869 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 407 | py | import sublime
import sublime_plugin
class GetSelectionCodeCommand(sublime_plugin.TextCommand):
def run(self, edit):
sel = self.view.sel()
if len(sel) > 0:
charcode = ord(self.view.substr(sel[0].begin()))
msg = "%d 0x%x" % (charcode, charcode)
self.view.set_status("Char Code", "Char Code: " + msg + " :: ")
sublime.set_clipboard(msg)
| [
"bluegray@users.noreply.github.com"
] | bluegray@users.noreply.github.com |
ea646c020a58d6eb93ac0b637c7b9fdd4b435641 | addb7882a96d30c431b40ba33309cbf8f8328894 | /taskgraph/tests/model.py | 26500f3cfe82c33d90c54b6c0e55e7a9e544f7e4 | [] | no_license | mmuddy/taskgraph | 581de61669f6ce56a87c487390a4b2ee3bbfb9ac | 0925dd2f8fd7c83b70a830cc5903366bf2c830ba | refs/heads/master | 2020-04-06T20:02:26.896467 | 2017-01-23T22:22:38 | 2017-01-23T22:22:38 | 68,546,946 | 0 | 0 | null | 2016-12-23T05:27:31 | 2016-09-18T20:58:23 | HTML | UTF-8 | Python | false | false | 6,519 | py | from taskgraph.model.model import *
from taskgraph.tasktracker.getinterface import get_interface
from .settings import tracker_dummy, tracker_redmine
from django.test import TestCase
from django.db import IntegrityError
class TestTracker(TestCase):
def create_tracker(self):
all_trackers = Tracker.objects.all()
tracker = Tracker.objects.create(url='no-validation', type='no-validation')
from_get_tracker = Tracker.objects.get(url='no-validation')
self.assertTrue(tracker and from_get_tracker)
self.assertEqual(tracker, from_get_tracker)
from_get_tracker.delete()
all_trackers_after_delete = Tracker.objects.all()
self.assertEqual(len(all_trackers), len(all_trackers_after_delete))
def test_unique(self):
Tracker.objects.get_or_create(url='no-validation', type='no-validation')
try:
Tracker.objects.create(url='no-validation', type='no-validation')
except IntegrityError:
return
self.assertTrue(False)
def assert_creation(test_case, models_before):
for model_type, objects_before in models_before:
test_case.assertTrue(model_type.objects.all().count() - objects_before > 0)
def assert_cleanup(test_case, models_before):
for model_type, objects_before in models_before:
print model_type
test_case.assertTrue(model_type.objects.all().count() - objects_before == 0)
def test_projects_creation_and_cleanup(test_case, tracker):
type_list = [Project, Assignee, TaskState, TaskRelationType, TaskCategory]
models_before = []
for model_type in type_list:
models_before.append((model_type, model_type.objects.all().count()))
tracker.restore_project_list(get_interface(tracker.type))
assert_creation(test_case, models_before)
tracker.delete()
# assert_cleanup(test_case, models_before)
def test_create_and_clean_up_tasks(test_case, tracker):
i_tracker = get_interface(tracker.type).connect(tracker)
i_tracker.refresh()
tracker.restore_project_list(i_tracker)
list_before = []
task_count = Task.objects.all().count()
rel_count = TaskRelation.objects.all().count()
list_before.append((Task, task_count))
list_before.append((TaskRelation, rel_count))
for project in tracker.projects:
project.is_active = True
project.save()
tracker.restore_project_tasks(i_tracker, only_active=False)
for model_type, before_count in list_before:
test_case.assertTrue(model_type.objects.all().count() - before_count > 0)
tracker.delete()
#for model_type, before_count in list_before:
# test_case.assertTrue(model_type.objects.all().count() - before_count == 0)
class TestTrackerWithDummy(TestCase):
def test_projects_creation_and_cleanup(self):
test_projects_creation_and_cleanup(self, tracker_dummy())
class TestTrackerWithRedmine(TestCase):
def test_projects_creation_and_cleanup(self):
test_projects_creation_and_cleanup(self, tracker_redmine())
class TestProjectWithDummy(TestCase):
def test_projects_creation_and_cleanup(self):
test_create_and_clean_up_tasks(self, tracker_dummy())
class TestProjectWithRedmine(TestCase):
def test_projects_creation_and_cleanup(self):
test_create_and_clean_up_tasks(self, tracker_redmine())
class TestIntegrationWithRedmine(TestCase):
def test_task_update(self):
tracker = tracker_redmine()
tracker.save()
i_tracker = get_interface(tracker.type)
i_tracker.connect(tracker)
i_tracker.refresh()
tracker.restore_project_list(get_interface(tracker.type))
pytiff = None
for project in tracker.projects:
if project.name == 'Pytift test':
project.is_active = True
project.save()
pytiff = project
break
tracker.restore_project_tasks(get_interface(tracker.type))
for task in filter(lambda t: t.category.name == 'UnitTest', pytiff.tasks):
subj_field = filter(lambda f: f.name == 'subject', task.additional_field)[0]
subj_field.char += '$ test passed'
subj_field.save()
task.save(save_on_tracker=True, i_tracker=i_tracker)
tracker.restore_project_tasks(get_interface(tracker.type))
pytiff = filter(lambda p: p.name == 'Pytift test', tracker.projects)[0]
for task in filter(lambda t: t.category.name == 'UnitTest', pytiff.tasks):
subj_field = filter(lambda f: f.name == 'subject', task.additional_field)[0]
subj_field.char = subj_field.char.split('$')[0]
subj_field.save()
task.save(save_on_tracker=True, i_tracker=i_tracker)
def test_relation_update(self):
tracker = tracker_redmine()
tracker.save()
i_tracker = get_interface(tracker.type)
i_tracker.connect(tracker)
i_tracker.refresh()
tracker.restore_project_list(get_interface(tracker.type))
pytiff = None
for project in tracker.projects:
if project.name == 'Pytift test':
project.is_active = True
project.save()
pytiff = project
break
tracker.restore_project_tasks(get_interface(tracker.type))
t_from = None
t_to = None
t_type = None
old_count = len(pytiff.tasks_relations)
for relation in pytiff.tasks_relations:
t_from = relation.from_task
t_to = relation.to_task
t_type = relation.type
relation.delete(i_tracker=i_tracker)
break
self.assertTrue(t_from and t_to and t_type)
self.assertEqual(len(pytiff.tasks_relations), old_count - 1)
tracker.restore_project_tasks(get_interface(tracker.type))
pytiff = filter(lambda p: p.name == 'Pytift test', tracker.projects)[0]
self.assertEqual(len(pytiff.tasks_relations), old_count - 1)
t_type = filter(lambda p: p.name == t_type.name, pytiff.task_relation_types)[0]
t_from = filter(lambda p: p.identifier == t_from.identifier, pytiff.tasks)[0]
t_to = filter(lambda p: p.identifier == t_to.identifier, pytiff.tasks)[0]
old_rel = TaskRelation.objects.create(project=pytiff, type=t_type, from_task=t_from, to_task=t_to)
old_rel.save(i_tracker=i_tracker)
self.assertEqual(len(pytiff.tasks_relations), old_count)
| [
"yakovlevvladyakovlev@yandex.ru"
] | yakovlevvladyakovlev@yandex.ru |
6f2edb09e5c1f151145ab5c1adacec423009c475 | e452f89c51180487f2ed68c33ca2fed54e14a967 | /1-Python-Programming-Basics (Sep 2020)/Course-Exercises-and-Exams/03_Conditional-Statements-Advanced/01.Lab-04-Personal-Titles.py | 72a970d05c0e96713bf60476264312a5d9ccd0bc | [
"MIT"
] | permissive | karolinanikolova/SoftUni-Software-Engineering | c996f18eea9fb93164ab674614e90b357ef4858a | 7891924956598b11a1e30e2c220457c85c40f064 | refs/heads/main | 2023-06-21T23:24:55.224528 | 2021-07-22T16:15:59 | 2021-07-22T16:15:59 | 367,432,464 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 856 | py | # 4. Обръщение според възраст и пол
# Да се напише конзолна програма, която прочита възраст (реално число) и пол ('m' или 'f'), въведени от потребителя, и отпечатва обръщение измежду следните:
# • "Mr." – мъж (пол 'm') на 16 или повече години
# • "Master" – момче (пол 'm') под 16 години
# • "Ms." – жена (пол 'f') на 16 или повече години
# • "Miss" – момиче (пол 'f') под 16 години
age = float(input())
sex = input()
if sex == 'f':
if age >= 16:
print('Ms.')
elif age < 16:
print('Miss')
elif sex == 'm':
if age >= 16:
print('Mr.')
elif age < 16:
print('Master')
| [
"Nikolova@eum.root.eumetsat.int"
] | Nikolova@eum.root.eumetsat.int |
9d32317d1286c1736e8582adf02d5839dba92f00 | 9246f53f8048e2040f6c40b12fd6e81bf11bce1b | /chapter10/kmeans_sklearn.py | acdaa10443f1f5da396d3791aea808f2b6ff816b | [
"MIT"
] | permissive | damonclifford/Python-Machine-Learning-By-Example-Third-Edition | 3541afefde8de164c3c82a47441f7fb20bbd7f71 | 35f364fd9f7f044771fb750bddf4b6fb101ea89e | refs/heads/master | 2022-12-12T02:50:18.503257 | 2020-09-02T05:48:14 | 2020-09-02T05:48:14 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 791 | py | '''
Source codes for Python Machine Learning By Example 3rd Edition (Packt Publishing)
Chapter 10 Discovering Underlying Topics in the Newsgroups Dataset with Clustering and Topic Modeling
Author: Yuxi (Hayden) Liu (yuxi.liu.ece@gmail.com)
'''
from sklearn import datasets
iris = datasets.load_iris()
X = iris.data[:, 2:4]
y = iris.target
import numpy as np
from matplotlib import pyplot as plt
k = 3
from sklearn.cluster import KMeans
kmeans_sk = KMeans(n_clusters=3, random_state=42)
kmeans_sk.fit(X)
clusters_sk = kmeans_sk.labels_
centroids_sk = kmeans_sk.cluster_centers_
for i in range(k):
cluster_i = np.where(clusters_sk == i)
plt.scatter(X[cluster_i, 0], X[cluster_i, 1])
plt.scatter(centroids_sk[:, 0], centroids_sk[:, 1], marker='*', s=200, c='#050505')
plt.show()
| [
"yuxi.liu.ece@gmail.com"
] | yuxi.liu.ece@gmail.com |
013916367cfd1dfcd2bbaf32bb98f24b7cbf6c17 | 273c436a67c50e0128e9f7c181f6a18891b9bac9 | /ModuleWeatherBundle/Resource/WeatherResourceImpl/Cached.py | da5ff27d7bd66cfeb5b8271e13095684a4cf19c5 | [] | no_license | jaepyoung/weather-microservice | 302c52cad82dcb7248a2b1025449bca308e5ef6f | 6818e9ae96817f3e8708b654a7922554441db393 | refs/heads/master | 2021-01-19T13:36:05.398880 | 2015-01-19T08:33:09 | 2015-01-19T08:33:09 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 975 | py | from ModuleWeatherBundle.Resource.WeatherResource import WeatherResource
import json
class Cached(WeatherResource):
def __init__(self, service, cache_redis, cache_ttl = 3600):
super(Cached, self).__init__()
self.service = service
self.cache_redis = cache_redis
self.cache_ttl = cache_ttl
def getWeatherConditions(self, region, city):
key_params = {
'method': 'conditions',
'region': region,
'city': city,
}
key = 'wunderground_' + json.dumps(key_params, separators=(',', ':'))
data = self.cache_redis.get(key)
if data is not None:
return json.loads(data)
data = self.service.getWeatherConditions(region, city)
self.cache_redis.set(
key,
json.dumps(data),
self.cache_ttl
)
return data
| [
"athlan@vgroup.pl"
] | athlan@vgroup.pl |
63cd09ad5e4f6c73fabb07766215cf1ea10619ac | 760a806cf48d62f96c32906f2cb2be861ab4eda2 | /venv/bin/python-config | 90275ad6c7c13e606e7fdb5fe45939227bd636f2 | [
"MIT"
] | permissive | francamacdowell/AnalyzeYou | e6339104181012ef196e0ce5d7c537efa21dd1c2 | 3fa6556b621af99543693fc26fa0d784996bd19c | refs/heads/master | 2021-02-23T14:35:41.925987 | 2020-03-06T11:39:30 | 2020-03-06T11:39:30 | 245,402,115 | 0 | 0 | MIT | 2020-03-06T11:33:13 | 2020-03-06T11:26:52 | Python | UTF-8 | Python | false | false | 2,359 | #!/home/macdowell/Workspace/AnalyzeYou/venv/bin/python
import sys
import getopt
import sysconfig
valid_opts = ['prefix', 'exec-prefix', 'includes', 'libs', 'cflags',
'ldflags', 'help']
if sys.version_info >= (3, 2):
valid_opts.insert(-1, 'extension-suffix')
valid_opts.append('abiflags')
if sys.version_info >= (3, 3):
valid_opts.append('configdir')
def exit_with_usage(code=1):
sys.stderr.write("Usage: {0} [{1}]\n".format(
sys.argv[0], '|'.join('--'+opt for opt in valid_opts)))
sys.exit(code)
try:
opts, args = getopt.getopt(sys.argv[1:], '', valid_opts)
except getopt.error:
exit_with_usage()
if not opts:
exit_with_usage()
pyver = sysconfig.get_config_var('VERSION')
getvar = sysconfig.get_config_var
opt_flags = [flag for (flag, val) in opts]
if '--help' in opt_flags:
exit_with_usage(code=0)
for opt in opt_flags:
if opt == '--prefix':
print(sysconfig.get_config_var('prefix'))
elif opt == '--exec-prefix':
print(sysconfig.get_config_var('exec_prefix'))
elif opt in ('--includes', '--cflags'):
flags = ['-I' + sysconfig.get_path('include'),
'-I' + sysconfig.get_path('platinclude')]
if opt == '--cflags':
flags.extend(getvar('CFLAGS').split())
print(' '.join(flags))
elif opt in ('--libs', '--ldflags'):
abiflags = getattr(sys, 'abiflags', '')
libs = ['-lpython' + pyver + abiflags]
libs += getvar('LIBS').split()
libs += getvar('SYSLIBS').split()
# add the prefix/lib/pythonX.Y/config dir, but only if there is no
# shared library in prefix/lib/.
if opt == '--ldflags':
if not getvar('Py_ENABLE_SHARED'):
libs.insert(0, '-L' + getvar('LIBPL'))
if not getvar('PYTHONFRAMEWORK'):
libs.extend(getvar('LINKFORSHARED').split())
print(' '.join(libs))
elif opt == '--extension-suffix':
ext_suffix = sysconfig.get_config_var('EXT_SUFFIX')
if ext_suffix is None:
ext_suffix = sysconfig.get_config_var('SO')
print(ext_suffix)
elif opt == '--abiflags':
if not getattr(sys, 'abiflags', None):
exit_with_usage()
print(sys.abiflags)
elif opt == '--configdir':
print(sysconfig.get_config_var('LIBPL'))
| [
"fmdss@ic.ufal.br"
] | fmdss@ic.ufal.br | |
04a6fcd8590335f274756db0927a0e81091f1511 | e4039781ee08c03d32cc5bdfa1d424a4a4f33ac0 | /BO_parser_prot.py | d54edca1049f6dca1df659dea22a86288773161e | [] | no_license | CoderMatthias/ortholog_pipeline | c003a03af94392eb8ce3cab4fc551630d595d456 | 7828d88700004f6dc61fde0c565d48f7c88e2d34 | refs/heads/master | 2021-01-19T12:39:23.897794 | 2015-07-15T19:49:29 | 2015-07-15T19:49:29 | 39,141,896 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,666 | py | #!/usr/bin/python3
import sys
import time
'''
This python script takes the blastp tab-deliminated data and parses it to include unique hits and an additive bit score
Requires: output from blastp, a dictionary to convert FBpp#s to FBgn#s for both Dmen and other D species
Usage: python BO_parser_prot.py blastp_output.tsv Dmel_FBpp_to_FBgn.tsv species_FBpp_to_FBgn.tsv
'''
start_time = time.time()
def line_list_from_input (sys_argv):
'''Open a file and make a line list with it's contents'''
with open(sys_argv, 'r') as source_file:
line_list = source_file.read().split('\n')
source_file_name = source_file.name
LoL = make_list_into_LoL(line_list)
return LoL , source_file_name
def make_list_into_LoL (line_list):
'''Take a line list and make a list of list (LoL)'''
record , LoL = () , []
for line in line_list:
if not line.startswith('#') and line.strip() != '':
record = line.split('\t')
LoL.append(record)
return LoL
def FBpp_to_FBgn_dict (dict_sys_argv):
'''Converts the FBpp <-> FBgn file and to a dictionary'''
LoL , nullname = line_list_from_input (dict_sys_argv)
out_dict = {}
for line in LoL:
out_dict[line[0]] = line[1]
return out_dict
def replace_FBpp_w_FBgn (pp_to_gn_dict , list_to_switch , column_to_switch):
'''Replaces protein number (FBpp) with the gene number (FBgn)'''
for line in list_to_switch:
line[column_to_switch] = pp_to_gn_dict[line[column_to_switch]]
return list_to_switch
def column_value_unique_list (line_list , column_number):
'''Make list of all unique items in column of list'''
unique_list = []
for line in line_list:
if line[column_number] not in unique_list:
unique_list.append(line[column_number])
return unique_list
def make_blast_dict (blast_subset):
'''make a dictionary of blast results where key = gene and value contains bitscore'''
blast_dict = {}
for line in blast_subset:
if line[1] not in blast_dict:
blast_dict[line[1]] = [float(line[-1])]
else:
blast_dict[line[1]].append(float(line[-1]))
return blast_dict
def write_output(name , list_to_write):
'''Write an output file from a list'''
output_file_name = '3_{}_parsed.tsv'.format(name[:-4])
print 'Output saved as: {}'.format(output_file_name)
with open(output_file_name, 'w') as output_file:
for line in list_to_write:
output_line = '\t'.join(map(str, line))
output_file.write(output_line + '\n')
def main():
blast_list , source_file_name = line_list_from_input (sys.argv[1])
m_dict = FBpp_to_FBgn_dict (sys.argv[2])
s_dict = FBpp_to_FBgn_dict (sys.argv[3])
blast_list = replace_FBpp_w_FBgn (m_dict , blast_list , 0)
blast_list = replace_FBpp_w_FBgn (s_dict , blast_list , 1)
unique_mel_genes = column_value_unique_list (blast_list , 0)
output_list = []
for mel_gene in unique_mel_genes:
blast_subset , new_blast_list = [] , []
for line in blast_list:
if line[0] == mel_gene:
blast_subset.append(line)
else:
new_blast_list.append(line)
blast_list = new_blast_list
blast_dict = make_blast_dict (blast_subset)
for blast in blast_subset:
blast.append(sum(blast_dict[blast[1]]))
if blast[0:2] + [blast[-1]] not in output_list:
output_list.append(blast[0:2] + [blast[-1]])
write_output(source_file_name , output_list)
# print (time.time()-start_time)
if __name__ == '__main__':
main()
| [
"Matt.Kanke@gmail.com"
] | Matt.Kanke@gmail.com |
2e9784d9f5133d131dcf95aad42a8e25daf9771b | 9f97c42310f47505eda2b5d6be28294dee7f0f15 | /test/functional/wallet_import_with_label.py | 4e5aebfaae9ccc25061c1b6fdfc3a6030877d25e | [
"MIT"
] | permissive | Madurajaya/cicoin | b7bc3cd65ef665e8c23d6787bb732d211b46e4f3 | b48b11574ae38ae063670a755b9d50ef6960e1e8 | refs/heads/master | 2022-04-13T21:04:57.846103 | 2020-04-01T05:30:32 | 2020-04-01T05:30:32 | 296,742,986 | 1 | 0 | MIT | 2020-09-18T22:37:12 | 2020-09-18T22:37:12 | null | UTF-8 | Python | false | false | 4,903 | py | #!/usr/bin/env python3
# Copyright (c) 2018 The Cicoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the behavior of RPC importprivkey on set and unset labels of
addresses.
It tests different cases in which an address is imported with importaddress
with or without a label and then its private key is imported with importprivkey
with and without a label.
"""
from test_framework.test_framework import CicoinTestFramework
from test_framework.wallet_util import test_address
class ImportWithLabel(CicoinTestFramework):
def set_test_params(self):
self.num_nodes = 2
self.setup_clean_chain = True
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def run_test(self):
"""Main test logic"""
self.log.info(
"Test importaddress with label and importprivkey without label."
)
self.log.info("Import a watch-only address with a label.")
address = self.nodes[0].getnewaddress()
label = "Test Label"
self.nodes[1].importaddress(address, label)
test_address(self.nodes[1],
address,
iswatchonly=True,
ismine=False,
label=label)
self.log.info(
"Import the watch-only address's private key without a "
"label and the address should keep its label."
)
priv_key = self.nodes[0].dumpprivkey(address)
self.nodes[1].importprivkey(priv_key)
test_address(self.nodes[1],
address,
label=label)
self.log.info(
"Test importaddress without label and importprivkey with label."
)
self.log.info("Import a watch-only address without a label.")
address2 = self.nodes[0].getnewaddress()
self.nodes[1].importaddress(address2)
test_address(self.nodes[1],
address2,
iswatchonly=True,
ismine=False,
label="")
self.log.info(
"Import the watch-only address's private key with a "
"label and the address should have its label updated."
)
priv_key2 = self.nodes[0].dumpprivkey(address2)
label2 = "Test Label 2"
self.nodes[1].importprivkey(priv_key2, label2)
test_address(self.nodes[1],
address2,
label=label2)
self.log.info("Test importaddress with label and importprivkey with label.")
self.log.info("Import a watch-only address with a label.")
address3 = self.nodes[0].getnewaddress()
label3_addr = "Test Label 3 for importaddress"
self.nodes[1].importaddress(address3, label3_addr)
test_address(self.nodes[1],
address3,
iswatchonly=True,
ismine=False,
label=label3_addr)
self.log.info(
"Import the watch-only address's private key with a "
"label and the address should have its label updated."
)
priv_key3 = self.nodes[0].dumpprivkey(address3)
label3_priv = "Test Label 3 for importprivkey"
self.nodes[1].importprivkey(priv_key3, label3_priv)
test_address(self.nodes[1],
address3,
label=label3_priv)
self.log.info(
"Test importprivkey won't label new dests with the same "
"label as others labeled dests for the same key."
)
self.log.info("Import a watch-only p2sh-segwit address with a label.")
address4 = self.nodes[0].getnewaddress("", "p2sh-segwit")
label4_addr = "Test Label 4 for importaddress"
self.nodes[1].importaddress(address4, label4_addr)
test_address(self.nodes[1],
address4,
iswatchonly=True,
ismine=False,
label=label4_addr,
embedded=None)
self.log.info(
"Import the watch-only address's private key without a "
"label and new destinations for the key should have an "
"empty label while the 'old' destination should keep "
"its label."
)
priv_key4 = self.nodes[0].dumpprivkey(address4)
self.nodes[1].importprivkey(priv_key4)
embedded_addr = self.nodes[1].getaddressinfo(address4)['embedded']['address']
test_address(self.nodes[1],
embedded_addr,
label="")
test_address(self.nodes[1],
address4,
label=label4_addr)
self.stop_nodes()
if __name__ == "__main__":
ImportWithLabel().main()
| [
"cicxcoin2@gmail.com"
] | cicxcoin2@gmail.com |
28983ad35bba438daa2553a1003ba96695c3d775 | 745b63bdfb798f88d4f1b7679f435e43e6f2aec1 | /pomodoro.py | 1a109425e5adc3ef44c4f3e24efc89c1d45024fb | [] | no_license | torjeikenes/pomodoro | 009bad75d2f0decca722d892253bd80266cabc85 | 07ccade38090f34b028e1e562c41e7a1bd77c836 | refs/heads/master | 2023-01-13T05:25:52.126481 | 2020-11-25T22:30:49 | 2020-11-25T22:30:49 | 316,047,832 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,070 | py | #! /usr/bin/env python3
# -*- coding: utf-8 -*-
# vim:fenc=utf-8
#
# Distributed under terms of the MIT license.
"""
Program for handling pomodoro timers
"""
import json
import argparse
from datetime import datetime, timedelta
import copy
import os
import subprocess
from pathlib import Path
home = str(Path.home())
file = home+'/bin/data.json'
datetimeFormat = '%Y-%m-%dT%H:%M:%S.%f'
pomodoroLen = 25
breakLen = 5
x = {
'start': 0,
'end' : 0,
'length' : pomodoroLen,
'type' : 'none'
}
def main():
if args.file:
file = args.file
if args.pomodoro:
newPomodoro(int(args.pomodoro))
if args.sbreak:
newBreak(int(args.sbreak))
if args.next:
nextTimer()
if args.check:
checkTime()
def nextTimer():
try:
with open(file, 'r') as f:
try:
lines = f.read().splitlines()
line = lines[-1]
data = json.loads(line)
except Exception as e:
raise Exception("Not valid json format")
except Exception as e:
return
if (data['type'] == 'pomodoro') and (data['end'] != 0):
newBreak(breakLen)
elif (data['type'] == 'break') and (data['end'] != 0):
newPomodoro(pomodoroLen)
def newBreak(length):
data = copy.copy(x)
now = datetime.now().strftime(datetimeFormat)
data['start'] = now
data['type'] = 'break'
data['length'] = length
writeToFile(data)
def newPomodoro(length):
data = copy.copy(x)
now = datetime.now().strftime(datetimeFormat)
data['start'] = now
data['type'] = 'pomodoro'
data['length'] = length
data['end'] = 0
writeToFile(data)
def writeToFile(data):
mode = 'a' if os.path.exists(file) else 'w'
with open(file, mode) as f:
f.write(json.dumps(data))
f.write('\n')
def checkTime():
try:
with open(file, 'r') as f:
try:
lines = f.read().splitlines()
line = lines[-1]
data = json.loads(line)
except Exception as e:
raise Exception("Not valid json format")
except Exception as e:
print("N/A")
return
cntd = "00:00"
sumToday = 0
today = datetime.today().date()
for l in lines:
lineData = json.loads(l)
start = datetime.strptime(lineData['start'], datetimeFormat)
if (lineData['end'] != 0) and (start.date() == today) and (lineData['type'] == 'pomodoro'):
sumToday += 1
if data['end'] == 0:
time = datetime.strptime(data['start'], datetimeFormat)
endtime = time + timedelta(minutes=int(data['length']))
now = datetime.now()
diff = endtime - now
if endtime < now:
notify(data)
data['end'] = endtime.strftime(datetimeFormat)
lines[-1] = json.dumps(data)
cntd = "00:00"
with open(file, 'w') as f:
f.write('\n'.join(lines) + '\n')
else:
cntd = ':'.join(str(diff).split(':')[1:])
cntd = cntd.split('.')[0]
tp = data['type'][0].upper()
returnString = "{} {} {}".format(sumToday, tp, cntd)
print(returnString)
def notify(data):
tp = data['type']
if (tp == 'pomodoro'):
message = "Time is up!\nTake a break."
elif (tp == 'break'):
message = "Break is over!\nGet back to work."
else:
message = "Time is up"
subprocess.Popen(['notify-send', message])
return
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--file', '-f', help="Set json file")
parser.add_argument('--pomodoro', '-p', const=pomodoroLen, nargs='?', help="Start a pomodoro timer")
parser.add_argument('--sbreak', '-b', const=breakLen, nargs='?',help="Start a break timer")
parser.add_argument('--check', '-c', action='store_true',help="Check time")
parser.add_argument('--next', '-n', action='store_true',help="Start next timer")
args = parser.parse_args()
main()
| [
"torje.n.eikenes@gmail.com"
] | torje.n.eikenes@gmail.com |
0704b14dc207bdeb9c69726cce59cb935ea707cc | b4752cce5d753784c4eb9c742079da6b9df50ab3 | /news_aggregator_api/save_data.py | 94fe61a818410e9c72dd9bddb4623d699044a6ac | [] | no_license | verain1/Conzu | 105458b33719f8e8304d25a74a06c8fd546b5693 | d61441db2af3d05c3b8cbbd01336b3dfc49f9f9f | refs/heads/main | 2023-07-25T08:20:13.186590 | 2021-08-13T19:14:59 | 2021-08-13T19:14:59 | 395,057,912 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 567 | py | from ndtv_scraper import get_ndtv_data # ndtv scraper
from toi_scraper import get_toi_data # toi scraper
import pandas as pd
import numpy as np
import os
import random
data1 = get_ndtv_data()
data2 = get_toi_data()
full_data = data1 + data2
full_data = pd.DataFrame(full_data)
full_data = full_data.dropna()
filtered_data = full_data.iloc[0::4]
linux_path = '/home/ansh/'
windows_path = 'C:/news_1/'
#os.system('cd ..')
full_data.to_csv(linux_path+'news_aggregator/articles.csv')
filtered_data.to_csv(linux_path+'news_aggregator/filtered.csv')
print(full_data) | [
"anshchadha9211@gmail.com"
] | anshchadha9211@gmail.com |
2e9630b46c62bf6ed75120e758ba48e9ba4e9aa3 | 2f86ac5ea6b2781c30a031f8e9bb02ccbe4bac57 | /ch05_external_testing_tools_calculator/test.py | ea55796af7459989924e7bc802d143f5f01b570d | [] | no_license | KatharinaWiedmann/module3_Business_Python | 183945bd0017c15f38b5d800fb89a8361bae6860 | fbe4b8ab0a903ea3a713a5f6b79b9dba7cce94c4 | refs/heads/master | 2020-04-18T03:21:54.050910 | 2019-02-07T12:16:51 | 2019-02-07T12:16:51 | 167,195,519 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 942 | py | # -*- coding: utf-8 -*-
"""
Created on Wed Jan 30 09:55:46 2019
@author: Katharina
"""
import unittest
from calculator import Calculator
class TddInPythonExample(unittest.TestCase):
def setUp(self):
self.calc = Calculator()
def test_calculator_add_method_returns_correct_result(self):
# result = self.calc.add(2,2)
# self.assertEqual(4, result)
# shorter version:
self.assertEqual(self.calc.add(2,2), 4)
def test_calculator_returns_error_message_if_both_args_no_numbers(self):
self.assertRaises(ValueError, self.calc.add, 'two', 'three')
def test_calculator_returns_error_message_if_x_not_number(self):
self.assertRaises(ValueError, self.calc.add, 'two', 3)
def test_calculator_returns_error_message_if_y_not_number(self):
self.assertRaises(ValueError, self.calc.add, 2, 'three')
if __name__ == '__main__':
unittest.main()
| [
"katie.wiedmann@gmx.de"
] | katie.wiedmann@gmx.de |
18a62f5f58f3eacf0f4b6e83ac4fda4770a77484 | a2e638cd0c124254e67963bda62c21351881ee75 | /Extensions/StructuredProductsDealPackage/FPythonCode/SP_ModuleReload.py | ed019b05682e9d07250ac27a96aa65a7a6824bdd | [] | no_license | webclinic017/fa-absa-py3 | 1ffa98f2bd72d541166fdaac421d3c84147a4e01 | 5e7cc7de3495145501ca53deb9efee2233ab7e1c | refs/heads/main | 2023-04-19T10:41:21.273030 | 2021-05-10T08:50:05 | 2021-05-10T08:50:05 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,569 | py |
# Need to add
# Additional Info
# - Participation (DealPackage)
# - CapitalProtection (DealPackage)
# - StrikePricePct (Instrument)
# - BarrierLevelPct (Instrument)
# - ProductQuantity (Deal Package)
# - AccumulatorLeverage
# Exotic Events
# - Initial Fixing
# ChoiceLists
# - AccDec (Val Group)
# - accDecModelDesc (Valuation Extension)
import SP_DealPackageHelper
import importlib
importlib.reload(SP_DealPackageHelper)
import SP_BusinessCalculations
importlib.reload(SP_BusinessCalculations)
import CompositeComponentBase
importlib.reload(CompositeComponentBase)
import CompositeExoticEventComponents
importlib.reload(CompositeExoticEventComponents)
import CompositeExoticComponents
importlib.reload(CompositeExoticComponents)
import CompositeOptionAdditionComponents
importlib.reload(CompositeOptionAdditionComponents)
import CompositeCashFlowComponents
importlib.reload(CompositeCashFlowComponents)
import CompositeOptionComponents
importlib.reload(CompositeOptionComponents)
import CompositeBasketComponents
importlib.reload(CompositeBasketComponents)
import CompositeBasketOptionComponents
importlib.reload (CompositeBasketOptionComponents)
import CompositeTradeComponents
importlib.reload(CompositeTradeComponents)
import StructuredProductBase
importlib.reload(StructuredProductBase)
import Validation_BarrierReverseConvertible
importlib.reload(Validation_BarrierReverseConvertible)
import SP_BarrierReverseConvertible
importlib.reload(SP_BarrierReverseConvertible)
import SP_CapitalProtectedNote
importlib.reload(SP_CapitalProtectedNote)
import SP_EqStraddle
importlib.reload(SP_EqStraddle)
import SP_CallPutSpread
importlib.reload(SP_CallPutSpread)
import SP_DualCurrencyDeposit
importlib.reload(SP_DualCurrencyDeposit)
import SP_WeddingCakeDeposit
importlib.reload(SP_WeddingCakeDeposit)
import SP_AccumulatorSetup
importlib.reload(SP_AccumulatorSetup)
import SP_AccumulatorCustomInsDef
importlib.reload(SP_AccumulatorCustomInsDef)
import SP_AccumulatorValuation
importlib.reload(SP_AccumulatorValuation)
import SP_AccumulatorModel
importlib.reload(SP_AccumulatorModel)
import SP_AccumulatorDealPackage
importlib.reload(SP_AccumulatorDealPackage)
import SP_Autocall
importlib.reload(SP_Autocall)
import SP_CapitalProtectedCertificate
importlib.reload(SP_CapitalProtectedCertificate)
import SP_CustomTradeActions
importlib.reload(SP_CustomTradeActions)
import SP_InvokeTradeActions
importlib.reload(SP_InvokeTradeActions)
import CustomLifeCycleEvents
importlib.reload(CustomLifeCycleEvents)
| [
"nencho.georogiev@absa.africa"
] | nencho.georogiev@absa.africa |
3884eb54e7e03a3ef48250ac38e73501f51b3ad0 | 55b3948a9f3b9ecc55800ee20f703693057d4577 | /code47.py | d168af513bc50186ca75f6fc67ad6ff413de98ee | [] | no_license | bommankondapraveenkumar/PYWORK | 31b1c4edfb3e34a7f4103435f77a25814623b891 | 099bc260b80b1d724d46b714df8c931e037ee420 | refs/heads/main | 2023-01-07T16:52:25.915269 | 2020-11-11T06:40:16 | 2020-11-11T06:40:16 | 311,883,307 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,282 | py | def horoscope():
M=input("enter the month and date :\n")
S=M.split()
month=S[0]
day=int(S[1])
print("YOUR ZODIAC SIGN IS:")
if(month=="december"):
if(day>21):
print("Capricorn")
else:
print("sagittarius")
elif(month=="january"):
if(day>19):
print("aquarius")
else:
print("capricorn")
elif(month=="february"):
if(day>19):
print("Pisces")
else:
print("aquarius")
elif(month=="march"):
if(day>20):
print("Aries")
else:
print("Pisces")
elif(month=="april"):
if(day>19):
print("Taurus")
else:
print("Aries")
elif(month=="may"):
if(day>20):
print("Gemini")
else:
print("Taurus")
elif(month=="june"):
if(day>20):
print("cancer")
else:
print("Gemini")
elif(month=="july"):
if(day>22):
print("Leo")
else:
print("cancer")
elif(month=="august"):
if(day>22):
print("Virgo")
else:
print("Leo")
elif(month=="september"):
if(day>22):
print("Libra")
else:
print("Virgo")
elif(month=="october"):
if(day>22):
print("Scorpio")
else:
print("Lobra")
elif(month=="november"):
if(day>21):
print("Sagittarius")
else:
print("Scorpio")
else:
print("please enter a valid month and date")
horoscope() | [
"noreply@github.com"
] | noreply@github.com |
b609de5a340b8ffa5463bb61550b269014cc97d8 | 673d55fe4fee15b4047bf0248f5ab5b8a72e3907 | /face_recognition/face_recognizer_opencv.py | 98e5ed9efda71000ebb7c22af5f5f0330e202bd7 | [] | no_license | manuel-lang/BlackForestHackathon | bfb08e66f59144792c66bd116976037eb0000c51 | d521d553974b1533f567f1e63f50f3f633022e1b | refs/heads/master | 2021-07-12T03:16:46.823216 | 2017-10-09T10:07:58 | 2017-10-09T10:07:58 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,624 | py | import cv2
import os
import numpy as np
lbph_rec = cv2.face.LBPHFaceRecognizer_create()
subjects = ["", "Manuel Lang", "Marius Bauer", "Tobias Oehler", "Jerome Klausmann"]
def detect_faces(img):
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
face_cascade = cv2.CascadeClassifier('lbpcascade_frontalface.xml')
faces = face_cascade.detectMultiScale(gray, scaleFactor=1.2, minNeighbors=5);
if (len(faces) == 0):
return None, None
val = []
for face in faces:
(x, y, w, h) = face
val.append(tuple((gray[y:y+w, x:x+h], face)))
cv2.imshow("test", gray[y:y+w, x:x+h])
return np.asarray(val)
def prepare_training_data(data_folder_path):
dirs = os.listdir(data_folder_path)
faces = []
labels = []
for dir_name in dirs:
if not dir_name.startswith("s"):
continue;
label = int(dir_name.replace("s", ""))
subject_dir_path = os.path.join(data_folder_path, dir_name)
subject_images_names = os.listdir(subject_dir_path)
for image_name in subject_images_names:
if image_name.startswith("."):
continue;
image_path = os.path.join(subject_dir_path, image_name)
image = cv2.imread(image_path)
cv2.imshow("Training on image...", cv2.resize(image, (400, 500)))
cv2.waitKey(100)
for val in detect_faces(image):
if val is None: continue
face, rect = val
if face is not None:
faces.append(face)
labels.append(label)
return faces, labels
def draw_rectangle(img, rect):
(x, y, w, h) = rect
cv2.rectangle(img, (x, y), (x+w, y+h), (0, 255, 0), 2)
def draw_text(img, text, x, y):
cv2.putText(img, text, (x, y), cv2.FONT_HERSHEY_PLAIN, 1.5, (0, 255, 0), 2)
def predict(test_img):
img = test_img.copy()
for val in detect_faces(img):
if val is None: continue
face, rect = val
label = lbph_rec.predict(face)
label_text = subjects[label[0]]
draw_rectangle(img, rect)
draw_text(img, label_text, rect[0], rect[1]-5)
return img
def train():
faces, labels = prepare_training_data("training")
print("Training classifier ...")
lbph_rec.train(faces, np.array(labels))
print("Finished training ...")
def test():
img = cv2.imread('test/2.jpg')
img1 = cv2.imread('test/jerome.jpg')
img2 = cv2.imread('test/tobias.jpg')
img3 = cv2.imread('test/marius.jpg')
img4 = cv2.imread('test/manu.jpg')
cv2.imshow('detection', predict(img))
cv2.waitKey(0)
cv2.destroyAllWindows()
cv2.imshow('detection-jerome', predict(img1))
cv2.waitKey(0)
cv2.destroyAllWindows()
cv2.imshow('detection-tobias', predict(img2))
cv2.waitKey(0)
cv2.destroyAllWindows()
cv2.imshow('detection-marius', predict(img3))
cv2.waitKey(0)
cv2.destroyAllWindows()
cv2.imshow('detection-manu', predict(img4))
cv2.waitKey(0)
cv2.destroyAllWindows()
def show_webcam(mirror=False):
cam = cv2.VideoCapture(0)
while True:
ret_val, img = cam.read()
if not img is None:
if not ret_val: continue
if mirror:
img = cv2.flip(img, 1)
try:
cv2.imshow('detection', predict(img))
except:
cv2.imshow('detection', img)
if cv2.waitKey(1) == 27:
break
cv2.destroyAllWindows()
def main():
train()
test()
#show_webcam(mirror=False)
if __name__ == '__main__':
main() | [
"manuellang183@gmail.com"
] | manuellang183@gmail.com |
f0d46ede1b3ecc911d9874cce0d0c7cca9e0d770 | 15f94f7b66d33ca1e80ad2cb2c7821fb3c4ca453 | /DataDash/DataModel/apps.py | 41c9d627581b199ca0003cbc6487fb6cb78ab27f | [
"MIT"
] | permissive | DS921020/AnalysisManager | 570fe2b08ba413e0616a057897c34fd2d4415c22 | e16d6fff807738b644174da73d15ddb2bb9f9ac4 | refs/heads/main | 2023-03-03T14:46:34.718085 | 2020-12-10T05:01:44 | 2020-12-10T05:01:44 | 320,133,997 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 98 | py | from django.apps import AppConfig
class TestmodelConfig(AppConfig):
name = 'DataModel'
| [
"ds110293@163.com"
] | ds110293@163.com |
ede721e55e9c3c008214b8d056bfb0e827d08a68 | ea55badf6640e807a4ed50190290dfe97db06e6c | /scikit-learn/Iris.py | 8637a2173705c4c36614a414f69cc2576ff10938 | [] | no_license | Natsu-Yuki/PythonCode | 31b7f161c8dfc05ac36a5dec9b9bab9b5f4b5b86 | 0cf856d33b008b811a3747a98a6224e5b3e3af30 | refs/heads/master | 2020-03-29T12:03:20.869303 | 2018-09-22T14:32:41 | 2018-09-22T14:32:41 | 149,882,790 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,891 | py | from sklearn.datasets import load_iris
from sklearn.model_selection import train_test_split
import pandas as pd
from sklearn.neighbors import KNeighborsClassifier
import numpy as np
iris_dataset = load_iris()
X_train, X_test, y_train, y_test = train_test_split(iris_dataset['data'], iris_dataset['target'], random_state=0)
knn=KNeighborsClassifier(n_neighbors=1)
knn.fit(X_train,y_train)
X_new =np.array([[5,2.9,1,0.2]])
prediction=knn.predict(X_new)
y_pre=knn.predict(X_test)
def acquaintance_data():
print("Keys of isir_dataset:\n{}".format(iris_dataset.keys()) + "\n\n.........")
print("Target names of isir_dataset:\n{}".format(iris_dataset['target_names']) + "\n\n.........")
print("Feature names of isir_dataset:\n{}".format(iris_dataset['feature_names']) + "\n\n.........")
print("Data of isir_dataset:\n{}".format(iris_dataset['data'][:5]) + "\n\n.........")
print("Target of isir_dataset:\n{}".format(iris_dataset['target'][:5]) + "\n\n.........")
def train_test_data():
print("X_train shape:{}".format(X_train.shape))
print("X_test shape:{}".format(X_test.shape))
print("y_train shape:{}".format(y_train.shape))
print("y_test shape:{}".format(y_test.shape))
def scatter_plot():
iris_dataframe=pd.DataFrame(X_train,columns=iris_dataset['feature_names'])
grr=pd.scatter_matrix(iris_dataframe,c=y_train,figsize=(15,15),marker='o',
hist_kwds={'bins':20},s=60,alpha=0.8
)
def main():
print('\n')
#print(knn.fit(X_train,y_train))
print("Prediction :{}".format(prediction))
print("Prediction target name:{}".format(iris_dataset['target_names'][prediction]))
print("Test set preditions:{}\n".format(y_pre))
print("Test set score:{:.2f}".format(np.mean(y_pre==y_test)))
print("Test set score:{:.2f}".format(knn.score(X_test,y_test)))
main()
| [
"ynatsu233@gmail.com"
] | ynatsu233@gmail.com |
69a73772a221b1d1fc46f63870acf9ab7b9d268f | 76a269c93a79b156240d9a2568bd2eee7258622f | /naive_bayes.py | 28a6c01619e26c4273617271c4be1ed825789eb7 | [] | no_license | omarn33/Spam-Email-Classifier | f6bfeb3e1c66363b49af086004c42bb0d6c4ef2c | 3b52c4fa7dbf45bd1aeabb9fb51183c92af2628b | refs/heads/master | 2023-06-28T10:20:06.620304 | 2021-08-03T04:47:33 | 2021-08-03T04:47:33 | 392,191,798 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,132 | py | # naive_bayes.py
# ---------------
# Licensing Information: You are free to use or extend this projects for
# educational purposes provided that (1) you do not distribute or publish
# solutions, (2) you retain this notice, and (3) you provide clear
# attribution to the University of Illinois at Urbana-Champaign
#
# Created by Justin Lizama (jlizama2@illinois.edu) on 09/28/2018
import numpy as np
from collections import Counter
"""
This is the main entry point for Part 1 of this MP. You should only modify code
within this file for Part 1 -- the unrevised staff files will be used for all other
files and classes when code is run, so be careful to not modify anything else.
"""
def naiveBayes(train_set, train_labels, dev_set, smoothing_parameter, pos_prior):
"""
train_set - List of list of words corresponding with each email
example: suppose I had two emails 'like this movie' and 'i fall asleep' in my training set
Then train_set := [['like','this','movie'], ['i','fall','asleep']]
train_labels - List of labels corresponding with train_set
example: Suppose I had two emails, first one was ham and second one was spam.
Then train_labels := [1, 0]
dev_set - List of list of words corresponding with each email that we are testing on
It follows the same format as train_set
smoothing_parameter - The smoothing parameter --laplace (1.0 by default)
pos_prior - positive prior probability (between 0 and 1)
"""
# *----Train Model----*
# Initialize Counters to store the frequency of every word in Ham/Spam emails
ham_word_counter = Counter()
spam_word_counter = Counter()
# Initialize dictionaries to store the probability of each word Ham/Spam emails
ham_word_probability = {}
spam_word_probability = {}
# Initialize a list to store the predicted development set labels
dev_labels = []
# Populate the frequency of every word in Ham/Spam emails
index = 0
for label in train_labels:
if label == 1:
ham_word_counter.update(train_set[index])
else:
spam_word_counter.update(train_set[index])
index += 1
# Display frequency
print("Ham Word Counter:")
print(ham_word_counter.most_common(10))
print()
print("Spam Word Counter:")
print(spam_word_counter.most_common(10))
print()
# Determine the total number of words in the Ham/Spam training email set
ham_total_words = 0
for word_frequency in ham_word_counter.values():
ham_total_words += word_frequency
spam_total_words = 0
for word_frequency in spam_word_counter.values():
spam_total_words += word_frequency
# Display totals BEFORE Laplace smoothing
print("Total Number of Words in Ham Emails BEFORE Laplace:")
print(ham_total_words)
print()
print("Total Number of Words in Spam Emails BEFORE Laplace:")
print(spam_total_words)
print()
# Add the words present in the developer set but absent in the ham set to the counter with a frequency of zero
for email in range(len(dev_set)):
for word in dev_set[email]:
if word not in ham_word_counter:
ham_word_counter.update([word])
ham_word_counter.subtract([word])
# Add the words present in the developer set but absent in the spam set to the counter with a frequency of zero
for email in range(len(dev_set)):
for word in dev_set[email]:
if word not in spam_word_counter:
spam_word_counter.update([word])
spam_word_counter.subtract([word])
# Display the ham counter after the addition of words with zero frequency
ham_word_counter_length = len(ham_word_counter)
print("Smallest Ham Word Frequency:")
print(ham_word_counter[ham_word_counter_length - 1])
print()
# Display the spam counter after the addition of words with zero frequency
spam_word_counter_length = len(spam_word_counter)
print("Smallest Spam Word Frequency:")
print(spam_word_counter[spam_word_counter_length - 1])
print()
# Copy ham word counter content into ham word probability dictionary
ham_word_probability = ham_word_counter.copy()
# Copy spam word counter content into spam word probability dictionary
spam_word_probability = spam_word_counter.copy()
# Display dictionaries before the addition of the Laplace smoothing constant
print("Ham Word Probability BEFORE Laplace:")
# print(ham_word_probability)
print()
print("Spam Word Probability BEFORE Laplace:")
# print(spam_word_probability)
print()
# Apply Laplace smoothing
for word in ham_word_probability:
ham_word_probability[word] += smoothing_parameter
for word in spam_word_probability:
spam_word_probability[word] += smoothing_parameter
# Display the dictionaries after the addition of the Laplace smoothing constant
print("Laplace Constant:")
print(smoothing_parameter)
print()
print("Ham Word Probability AFTER Laplace:")
# print(ham_word_probability)
print()
print("Spam Word Probability AFTER Laplace:")
# print(spam_word_probability)
print()
# Determine the total number of words after Laplace smoothing
ham_word_total = sum(ham_word_probability.values())
spam_word_total = sum(spam_word_probability.values())
# Display totals AFTER Laplace smoothing
print("Total Number of Words in Ham Emails AFTER Laplace:")
print(ham_word_total)
print()
print("Total Number of Words in Spam Emails AFTER Laplace:")
print(spam_word_total)
print()
# Determine each word's likelihood in ham/spam emails (logging the probabilities to avoid underflow)
for word in ham_word_probability:
ham_word_probability[word] = np.log((ham_word_probability[word]) / ham_word_total)
for word in spam_word_probability:
spam_word_probability[word] = np.log((spam_word_probability[word]) / spam_word_total)
# Determine likelihood of ham/spam prior [i.e: log(P(Ham)) and log(P(Spam))]
likelihood_of_ham = np.log(pos_prior)
likelihood_of_spam = np.log(1.0 - pos_prior)
# *----Test Model----*
likelihood_email_is_ham = likelihood_of_ham
likelihood_email_is_spam = likelihood_of_spam
for email in range(len(dev_set)):
# Based on the words in a given email, determine the likelihood the email is ham and spam
for word in dev_set[email]:
likelihood_email_is_ham += ham_word_probability[word]
likelihood_email_is_spam += spam_word_probability[word]
# Classify email as ham or spam based on likelihood value
if likelihood_email_is_ham > likelihood_email_is_spam:
dev_labels.append(1)
else:
dev_labels.append(0)
# Reset likelihoods to initial values
likelihood_email_is_ham = likelihood_of_ham
likelihood_email_is_spam = likelihood_of_spam
print("Development Labels:")
print(dev_labels)
print()
# return predicted labels of development set
return dev_labels
| [
"omarnaeem333@gmail.com"
] | omarnaeem333@gmail.com |
4fe810f0f0f672f8136173ab9c58da8afa0a8929 | a986754144d9f1db1ce5ac6d86c164ae1667ed3e | /cuenta/migrations/0002_auto_20210207_0918.py | 83d183b5e01b44efee8e7e6c538f4a1bf01f5032 | [] | no_license | mateo9516/ChatPublicoDjango | 4b2bcc7eb75ed7fb5a73ab6927bdd2c11bbdc376 | 746c13a3ff48cf69bd3ff1d1f9ea9b24a4e909b0 | refs/heads/master | 2023-03-05T01:28:03.678399 | 2021-02-07T17:43:27 | 2021-02-07T17:43:27 | 336,840,304 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 694 | py | # Generated by Django 2.2.15 on 2021-02-07 14:18
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('cuenta', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='cuenta',
name='is_admin',
field=models.BooleanField(default=False),
),
migrations.AddField(
model_name='cuenta',
name='is_staff',
field=models.BooleanField(default=False),
),
migrations.AddField(
model_name='cuenta',
name='is_superuser',
field=models.BooleanField(default=False),
),
]
| [
"mateo.echeverry96@gmail.com"
] | mateo.echeverry96@gmail.com |
ffb75083105752e6e34ddf33fd1f369d3dcae145 | 0a8ef50b8dd8e5a843e6fe3e6692eeefbad9fd84 | /Student9Week/Singleton.py | ff48188b22072087709a6281b2f2310b3621f9a3 | [] | no_license | VitaliyKrytsun/Student | 7e84e66e5ea14dbaced6c46a7e9af17d67c981ff | 451cc4dbb4d2fb8f78f63e6a9d712b807d9c74dc | refs/heads/master | 2020-08-17T07:39:01.441525 | 2019-12-10T11:58:17 | 2019-12-10T11:58:17 | 215,633,757 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 963 | py | # metaclass
# class Singleton(type):
# __cls_instances = {}
# def __call__(cls, *args, **kwargs):
# if cls not in Singleton.__cls_instances:
# Singleton.__cls_instances[cls] = super().__call__(*args, **kwargs)
# return Singleton.__cls_instances[cls]
# class A(metaclass=Singleton):
# pass
# a1 = A()
# a2 = A()
# print(id(a1) == id(a2))
# class
# class A:
# __instances = None
# def __new__(cls, *args, **kwargs):
# if A.__instances is None:
# A.__instances = super().__new__(cls, *args, **kwargs)
# return A.__instances
# a1 = A()
# a2 = A()
# print(id(a1) == id(a2))
# decorator
def Singleton(cls):
objs_dict = {}
def wrapper(*args, **kwargs):
if cls not in objs_dict:
objs_dict[cls] = cls(*args, **kwargs)
return objs_dict
return wrapper
@Singleton
class A():
pass
a1 = A()
a2 = A()
print(id(a1) == id(a2))
print(type(A))
| [
"56654633+VitaliyKrytsun@users.noreply.github.com"
] | 56654633+VitaliyKrytsun@users.noreply.github.com |
92390ff097f0dc700869fdfc84c2e3606ee46f1d | 2260c05c1fae664b7a6395b6b8e2c5ad5a61eb4b | /driver.py | 66222accfc43a640d831c08077a3bc31dd4579d5 | [] | no_license | roulaoregan/neural_networks | 05bb3e9572303c3b68cdaa2d710645cd5061bf70 | 9a158ab264fd12bb6b5175786d333ea9b574f332 | refs/heads/master | 2021-01-01T19:11:58.407523 | 2014-02-06T00:34:18 | 2014-02-06T00:34:18 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 174 | py | '''
Main driver for Boltzmann machine
'''
import logger
import os
import re
import sys
def main(argv=None):
pass
if '__name__' == '__main__':
sys.exit(main())
| [
"roula.oregan@gmail.com"
] | roula.oregan@gmail.com |
620a896d4a884a98be6bc854d50f98a8b7d210d7 | d85fa999d626ccab2523c8c551cc2f7eb100571c | /Task2E.py | 91ec4b688e91583a21c1d8c811c9a09eb5c5d1c4 | [
"MIT"
] | permissive | swan11jf/CUED-Flood-Warning-Project | dcb4f412525b576fe1e8cd89aadf09920d14fe1b | 93636615ee85eb4ed5ba0ef7414bdbedccc0bcb4 | refs/heads/main | 2023-02-01T20:26:21.449331 | 2020-12-21T11:15:18 | 2020-12-21T11:15:18 | 323,312,508 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 732 | py | from floodsystem.stationdata import build_station_list
from floodsystem.stationdata import update_water_levels
from floodsystem.datafetcher import fetch_measure_levels
from floodsystem.flood import stations_highest_rel_level
from floodsystem.plot import plot_water_levels
import datetime
def run():
stations = build_station_list()
update_water_levels(stations)
stations_relative = stations_highest_rel_level(stations, 5)
dt = 10
for station in stations_relative:
dates, levels = fetch_measure_levels(station.measure_id, datetime.timedelta(days=dt))
plot_water_levels(station, dates, levels)
if __name__ == '__main__':
print("*** Task 2E: CUED Part IA Flood Warning System ***")
run() | [
"noreply@github.com"
] | noreply@github.com |
c913f8fbfe5cfedb9004bb0dd5b99c11a599285b | 485ffbd9a08f72a4ecae63d1695fb82dccc6f195 | /tm/tw3.py | 0b12ecf535fc659e7a489babd72ce638c6387c22 | [] | no_license | ka9epedia/test | 77850d64ae2dc6c1032deebaf43f11b87276da2e | 02b9c43335fc058b9fda936c2b119614c99eb7df | refs/heads/master | 2020-04-17T07:36:12.112061 | 2019-01-18T09:18:07 | 2019-01-18T09:18:07 | 154,784,187 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 14,522 | py | # coding: utf-8
from requests_oauthlib import OAuth1Session
from requests.exceptions import ConnectionError, ReadTimeout, SSLError
import json, datetime, time, pytz, re, sys, traceback, pymongo
from pymongo import MongoClient
from collections import defaultdict
from pprint import pprint
import numpy as np
import unicodedata
import MeCab as mc
import collections
import json
KEYS = {
'consumer_key': 'U6OCU525mGe27DntCYQnIlp70',
'consumer_secret': 'mZeQ8HdILVbnZB3lRQJht1T8gB7yKmQMnJkkUMLGoLtDHvr6Qn',
'access_token': '875272026281332737-nrx6TzruwZs7Pge90SXaAD89bxAbRoF',
'access_secret': 'wxSlu6NaXEhYpst7SeHL2fJLAh0a5McWzfL0zq6LLTbWg'
}
twitter = None
connect = None
db = None
tweetdata = None
meta = None
freqwords = {}
freqpair = {}
max = 0
noun_score = 0
verb_score = 0
adjective_score = 0
noun_score_c, adjective_score_c, verb_score_c, adverb_score_c = 0, 0, 0, 0
# TwitterAPI, MongoDBへの接続設定
def initialize():
global twitter, connect, db, tweetdata, meta
twitter = OAuth1Session(KEYS['consumer_key'], KEYS['consumer_secret'],
KEYS['access_token'], KEYS['access_secret'])
connect = MongoClient('localhost', 27017)
db = connect.okymrestaurant
#db = connect.anal1
tweetdata = db.tweetdata
meta = db.metadata
initialize()
# 感情辞書によるポジネガ分析の前段処理
noun_words, adjective_words, verb_words, adverb_words = [], [], [], []
noun_point, adjective_point, verb_point, adverb_point = [], [], [], []
pn = open('/home/odalab/Desktop/kankou/tm/pn_corpus/pn_ja.dic.txt', 'r')
positive_weight = 44861.0 / 49963.0 #1.0
negative_weight = 5122.0 / 49983.0
for line in pn:
line = line.rstrip()
x = line.split(':')
if abs(float(x[3])) > 0: #ポイントの調整
if x[2] == '名詞':
noun_words.append(x[0])
noun_point.append(x[3])
if x[2] == '形容詞':
adjective_words.append(x[0])
adjective_point.append(x[3])
if x[2] == '動詞':
verb_words.append(x[0])
verb_point.append(x[3])
if x[2] == '副詞':
adverb_words.append(x[0])
adverb_point.append(x[3])
pn.close()
# tweet検索
def getTweetData(search_word):
global twitter
url = 'https://api.twitter.com/1.1/search/tweets.json'
params = {
'q': search_word,
'count': '100'
}
req = twitter.get(url, params = params)
if req.status_code == 200:
# 成功
timeline = json.loads(req.text)
metadata = timeline['search_metadata']
statuses = timeline['statuses']
limit = req.headers['x-rate-limit-remaining'] if 'x-rate-limit-remaining' in req.headers else 0
reset = req.headers['x-rate-limit-reset'] if 'x-rate-limit-reset' in req.headers else 0
return {
"result": True,
"metadata": metadata,
"statuses": statuses,
"limit": limit,
"reset_time": datetime.datetime.fromtimestamp(float(reset)),
"reset_time_unix": reset
}
else:
# 失敗
return {
"result": False,
"status_code": req.status_code
}
# 文字列を日本時間にタイムゾーンを合わせた日付型で返す
def str_to_date_jp(str_date):
dts = datetime.datetime.strptime(str_date, '%a %b %d %H:%M:%S +0000 %Y')
return pytz.utc.localize(dts).astimezone(pytz.timezone('Asia/Tokyo'))
# 現在時刻をUNIX時間で返す
def now_unix_time():
return time.mktime(datetime.datetime.now().timetuple())
#お店情報取得
res = getTweetData(u'岡山市')
if res['result'] == False:
# 取得に失敗
print("Error! status code: {0:d}".format(res['status_code']))
if int(res['limit']) == 0:
# API制限に達した。データはとれてきてる。
print("API制限に達したっぽい")
else:
print("API LIMIT:", res['limit'])
if len(res['statuses']) == 0:
# 例外投げる 検索結果0件
pass
else:
# mongoDBに入れる
meta.insert({"metadata": res['metadata'], "insert_date": now_unix_time()})
for st in res['statuses']:
tweetdata.insert(st)
def mecab_analysis(sentence):
t = mc.Tagger('-Ochasen -d /usr/local/lib/mecab/dic/mecab-ipadic-neologd/')
sentence = sentence.replace('\n', ' ')
text = sentence.encode('utf-8')
node = t.parseToNode(text)
result_dict = defaultdict(list)
for i in range(140): # ツイートなのでMAX140文字
if node.surface != "": # ヘッダとフッタを除外
word_type = node.feature.split(",")[0]
if word_type in ["名詞", "形容詞", "動詞"]:
plain_word = node.feature.split(",")[6]
if plain_word != "*":
result_dict[word_type.decode('utf-8')].append(plain_word.decode('utf-8'))
node = node.next
if node is None:
break
return result_dict
all_words_list = []
#全てのTweetデータに対して形態素に分けていく処理
for d in tweetdata.find({},{'_id':1, 'id':1, 'text':1, 'noun':1, 'verb':1, 'adjective':1}, no_cursor_timeout=True, timeout=False):
freqwords = {}
freqpair = {}
max = 0
res = mecab_analysis(unicodedata.normalize('NFKC', d['text'])) # 半角カナを全角カナに
words_list = []
hozon_list = {}
freqp_word = []
# 品詞毎にフィールド分けして入れ込んでいく
# 単語出現回数をカウント
for k in res.keys():
if k == u'形容詞': # adjective
adjective_list = []
for w in res[k]:
words_list.append(w)
all_words_list.append(w)
adjective_list.append(w)
words_cnt = collections.Counter(words_list)
adjective_cnt = collections.Counter(adjective_list)
# ポジネガ分析
s_cnt = 0
for i in adjective_words:
if w == i:
if adjective_point[s_cnt] >= 0:
adjective_score += float(adjective_point[cnt]) * float(positive_weight)
adjective_score_c = float(adjective_point[cnt]) * float(positive_weight)
else:
adjective_score += float(adjective_point[cnt]) * float(negative_weight)
adjective_score_c = float(adjective_point[cnt]) * float(negative_weight)
s_cnt += 1
#print res[k]
#print w, noun_score
#print w,i
#print "test"
#print all_words_list
print str(w.encode('utf-8')), str(adjective_score)
hozon_list[w] = {u'単語': words_list, u'品詞': k, u'出現頻度': words_cnt, u'ポジネガ分析結果(総和)': adjective_score, u'ポジネガ分析結果(単体)': adjective_score_c, u'共起頻度': 0}
tweetdata.update({'_id' : d['_id']},{'$push': {'adjective':{'$each':adjective_list}}})
elif k == u'動詞': # verb
verb_list = []
for w in res[k]:
words_list.append(w)
all_words_list.append(w)
verb_list.append(w)
words_cnt = collections.Counter(words_list)
verb_cnt = collections.Counter(verb_list)
# ポジネガ分析
s_cnt = 0
for i in verb_words:
if w == i:
if verb_point[s_cnt] >= 0:
verb_score += float(verb_point[cnt]) * float(positive_weight)
verb_score_c = float(verb_point[cnt]) * float(positive_weight)
else:
verb_score += float(verb_point[cnt]) * float(negative_weight)
verb_score_c = float(verb_point[cnt]) * float(negative_weight)
s_cnt += 1
#print res[k]
#print w, noun_score
#print w,i
print str(w.encode('utf-8')), verb_score
hozon_list[w] = {u'単語': words_list, u'品詞': k, u'出現頻度': words_cnt, u'ポジネガ分析結果(総和)': verb_score, u'ポジネガ分析結果(単体)': verb_score_c, u'共起頻度': 0}
tweetdata.update({'_id' : d['_id']},{'$push': {'verb':{'$each':verb_list}}})
elif k == u'名詞': # noun
noun_list = []
for w in res[k]:
words_list.append(w)
all_words_list.append(w)
noun_list.append(w)
words_cnt = collections.Counter(words_list)
noun_cnt = collections.Counter(noun_list)
# ポジネガ分析
s_cnt = 0
for i in noun_words:
if w == i:
if noun_point[s_cnt] >= 0:
noun_score += float(noun_point[cnt]) * float(positive_weight)
noun_score_c = float(noun_point[cnt]) * float(positive_weight)
else:
noun_score += float(noun_point[cnt]) * float(negative_weight)
noun_score_c = float(noun_point[cnt]) * float(negative_weight)
s_cnt += 1
#print res[k]
#print w, noun_score
#print w,i
#no_noun += 1
print str(w.encode('utf-8')), str(noun_score)
hozon_list[w] = {u'単語': words_list, u'品詞': k, u'出現頻度': words_cnt, u'ポジネガ分析結果(総和)': noun_score, u'ポジネガ分析結果(単体)': noun_score_c, u'共起頻度': 0}
tweetdata.update({'_id' : d['_id']},{'$push': {'noun':{'$each':noun_list}}})
#elif k == u'副詞': # adverb
# adverb_list = []
# for w in res[k]:
# words_list.append(w)
# adverb_list.append(w)
# words_cnt = collections.Counter(words_list)
# adverb_cnt = collections.Counter(adverb_list)
# ポジネガ分析
# s_cnt = 0
# for i in noun_words:
# if w == i:
# if adverb_point[s_cnt] >= 0:
# adverb_score += float(adverb_point[cnt]) * float(positive_weight)
# else:
# adverb_score += float(adverb_point[cnt]) * float(negative_weight)
# s_cnt += 1
#print res[k]
#print w, noun_score
# print w,i
#no_noun += 1
# tweetdata.update({'_id' : d['_id']},{'$push': {'adverb':{'$each':adverb_list}}})
# 共起単語出現回数をカウント
print ("--- 共起頻度 ---")
for i in range(len(words_list)):
for j in range(len(freqwords)):
if words_list[i] == freqwords:
freqwords[words_list[i]] += 1
else:
freqwords[words_list[i]] = 1
if max < freqwords[words_list[i]]:
max = freqwords[words_list[i]]
for j in range(i + 1, len(words_list)):
if words_list[i] + "\t" + words_list[j] == freqpair:
freqpair[words_list[i] + "\t" + words_list[j]] += 1
freqp_word.append(freqpair[words_list[i] + "\t" + words_list[j]])
else:
freqpair[words_list[i] + "\t" + words_list[j]] = 1
hozon_list[words_list[i]] = {u'単語': words_list, u'品詞': k, u'出現頻度': words_cnt, u'ポジネガ分析結果': adjective_score, u'共起頻度': freqp_word}
print max
print("--- 指定した全品詞の出現頻度 ---")
for word, cnt in sorted(words_cnt.iteritems(), key=lambda x: x[1], reverse=True):
print str(word.encode('utf-8')), cnt
# JSON化
print(json.dumps(word,
indent=4,
ensure_ascii=False,
sort_keys=True)),
print ", ",
print(json.dumps(cnt,
indent=4,
ensure_ascii=False,
sort_keys=True))
#f = open('output-okayama.json', 'w')
#json.dump(word, f, indent=4)
print("--- 名詞の出現頻度 ---")
for word, cnt in sorted(noun_cnt.iteritems(), key=lambda x: x[1], reverse=True):
print str(word.encode('utf-8')), cnt
#print(json.dumps(word,
# indent=4,
# ensure_ascii=False,
# sort_keys=True)),
#print ", ",
#print(json.dumps(cnt,
# indent=4,
# ensure_ascii=False,
# sort_keys=True))
print("--- 動詞の出現頻度 ---")
for word, cnt in sorted(verb_cnt.iteritems(), key=lambda x: x[1], reverse=True):
print str(word.encode('utf-8')), cnt
#print(json.dumps(word,
# indent=4,
# ensure_ascii=False,
# sort_keys=True)),
#print ", ",
#print(json.dumps(cnt,
# indent=4,
# ensure_ascii=False,
# sort_keys=True))
print("--- 形容詞の出現頻度 ---")
for word, cnt in sorted(adjective_cnt.iteritems(), key=lambda x: x[1], reverse=True):
print str(word.encode('utf-8')), cnt
#print(json.dumps(word,
# indent=4,
# ensure_ascii=False,
# sort_keys=True)),
#print ", ",
#print(json.dumps(cnt,
# indent=4,
# ensure_ascii=False,
# sort_keys=True))
#単語出現回数、共起単語出現回数からシンプソン係数を計算
simp = {}
for key, value in freqpair.iteritems():
if freqpair[key] == 1:
continue
p = re.compile('^([^\t]+)\t([^\t]+)$')
m = p.search(key)
if m == None:
continue
if freqwords[m.group(1)] < freqwords[m.group(2)]:
simpson = float(value) / float(freqwords[m.group(1)])
else:
simpson = float(value) / float(freqwords[m.group(2)])
if simpson < 0.1:
continue
simp[key] = simpson
print "%s" % max
for key, value in freqwords.iteritems():
print "%s\t%s" % (key, value)
for key, value in simp.iteritems():
print "%s\t%s" % (key, value)
f = open('output-okayama.json', 'w')
json.dump(all_words_list, f, indent=4)
f = open('output-okayama-simpson.json', 'w')
json.dump(simp, f, indent=4)
| [
"kagepedia@gmail.com"
] | kagepedia@gmail.com |
a98c0f87c5e54efc98415dca9576d0bcecc3346f | aae551baa369fda031f363c2afbdf1984467f16d | /Machine_Learning/Programming_Assignments/CS15B001_PA3/Code/q2/bernoulli.py | 59000649f234d836785dc85871bffe40b30ef448 | [] | no_license | ameet-1997/Course_Assignments | 37f7d4115baec383ccf029772efcf9c33beb2a23 | 629e9d5cfc6fa6cf37a96c5fcc33bc669cbdc59d | refs/heads/master | 2021-05-16T16:23:32.731296 | 2018-02-03T05:57:01 | 2018-02-03T05:57:01 | 119,939,202 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,114 | py | import pandas as pd
import numpy as np
from scipy import sparse
import os
import functions
import time
from sklearn.model_selection import KFold
from sklearn.metrics import precision_recall_fscore_support
from tabulate import tabulate
from sklearn.naive_bayes import BernoulliNB, MultinomialNB
from sklearn.metrics import precision_recall_curve
from sklearn.model_selection import train_test_split
import matplotlib.pyplot as plt
# Load the data
data_matrix = functions.load_sparse_csr('data_sparse').todense()
labels_matrix = np.loadtxt('labels.csv', delimiter=',')
# Cross Validation
kf = KFold(n_splits=5)
counter = 0
[avr_prec, avr_rec, avr_fsc] = [.0,.0,.0]
for train_index, test_index in kf.split(data_matrix):
counter += 1
data_train, data_test = data_matrix[train_index], data_matrix[test_index]
labels_train, labels_test = labels_matrix[train_index], labels_matrix[test_index]
b = BernoulliNB()
b.fit(data_train, labels_train)
predicted_labels = b.predict(data_test)
# # Estimate the class priors
# spam_prior = float(np.count_nonzero(labels_train == 0))/labels_train.shape[0]
# ham_prior = float(np.count_nonzero(labels_train == 1))/labels_train.shape[0]
# # Estimate the conditional probabilities
# # Get all spam articles and get the column sum
# # Do the same for all ham articles
# # Add-1 smoothing is performed here
# cond_ham = ((np.count_nonzero(data_train[labels_train==1], axis=0)+1).astype(dtype=float))/(data_train[labels_train==1].shape[0]+2)
# cond_spam = ((np.count_nonzero(data_train[labels_train==0], axis=0)+1).astype(dtype=float))/(data_train[labels_train==0].shape[0]+2)
# # Using log so that there are no underflow problems
# predicted_labels = np.ones(shape=labels_test.shape, dtype=float)
# for i in range(predicted_labels.shape[0]):
# score_ham = np.sum(np.multiply(np.log(cond_ham), data_test[i,:]))+np.log(ham_prior)
# score_spam = np.sum(np.multiply(np.log(cond_spam), data_test[i,:]))+np.log(spam_prior)
# if score_spam > score_ham:
# predicted_labels[i] = 0
# else:
# predicted_labels[i] = 1
# print("Fold Number "+str(counter))
[prec,rec,fsc,sup] = precision_recall_fscore_support(labels_test, predicted_labels)
avr_prec += prec[1]
avr_rec += rec[1]
avr_fsc += fsc[1]
# print tabulate([prec, rec, fsc], headers=['Spam', 'Ham'])
# print("")
print("")
print("Average Scores for Spam Class")
print("Precision: "+str(avr_prec/5))
print("Recall: "+str(avr_rec/5))
print("FScore: "+str(avr_fsc/5))
# Plot the PR Curves
train_data, test_data, train_labels, test_labels = train_test_split(data_matrix, labels_matrix, test_size=0.33, random_state=42)
m = BernoulliNB()
m.fit(train_data, train_labels)
probab = m.predict_proba(test_data)
precision_, recall_, threshold_ = precision_recall_curve(test_labels, probab[:,1])
fig = plt.figure()
fig.suptitle('Precision Recall Curve')
ax = fig.add_subplot(111)
ax.set_xlabel('Precision')
ax.set_ylabel('Recall')
# ax.fill(precision_,np.zeros(shape=precision_.shape),'b')
p = [0]
r = [1]
p.extend(list(precision_))
r.extend(list(recall_))
ax.fill(p, r,'b', zorder=5)
plt.plot(p, r)
plt.show() | [
"ameetsd97@gmail.com"
] | ameetsd97@gmail.com |
d5d6859ced095e77ba26501b5cb023a48cdd6535 | 8d6ba22866a97b551de7ecccea75769c055afded | /HW/hw8_q4.py | 496c50712582022223828e7fd6ce1a850528c3bc | [] | no_license | adibsxion19/CS1114 | c52b8815ea3841e0e47a2442cceb5e9c5a81806a | 0425d3d96664ee7a5ef88e146d51759e4a0bf50f | refs/heads/main | 2023-04-10T23:30:31.427021 | 2021-04-06T05:03:50 | 2021-04-06T05:03:50 | 348,604,834 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,354 | py | # Author: Aadiba Haque
# Assignment / Part: HW8 - Q4
# Date due: 2020-04-24
# I pledge that I have completed this assignment without
# collaborating with anyone else, in conformance with the
# NYU School of Engineering Policies and Procedures on
# Academic Misconduct.
def clean_data(complete_data_filename, cleaned_data_filename):
#sig: string, string
complete_data = open(complete_data_filename,"r")
cleaned_data = open(cleaned_data_filename,"w")
output = ''
lst_of_indices = (2,3,4,7,8,9)
for lines in complete_data:
lines = lines.strip()
lst_values = lines.split(',')
for index in lst_of_indices:
output += lst_values[index] + ','
output = output.strip(',')
output += '\n'
print(output, file=cleaned_data)
complete_data.close()
cleaned_data.close()
def convert_date_time_to_edt(cleaned_data_filename, edt_file_name):
#sig: string, string
#UTC is 4 hours ahead of EDT
cleaned_data = open(cleaned_data_filename,"r")
edt_file = open(edt_file_name,"w")
counter = 0
output = ''
lst_of_indices = (1,0,2,3,4,5)
for lines in cleaned_data:
lines = lines.strip()
if lines == '':
continue
lst_values = lines.split(',')
if counter == 0:
temp = lst_values[1]
lst_values[1] = lst_values[0]
lst_values[0] = temp
lst_values[2] = "Last Update Date"
lst_values.insert(3,"Last Update Time (EDT)")
output += ','.join(lst_values)
else:
for index in lst_of_indices:
if index == 2:
last_update = lst_values[2].split()
date = last_update[0]
time = last_update[1].split(':')
if int(time[0]) < 4:
date_lst = date.split('/')
date_lst[1] = str(int(date_lst[1])- 1)
output += '/'.join(date_lst) + ','
else:
output += date + ','
last_update_time = int(time[0]) - 4
if last_update_time < 0:
last_update_time += 24
time[0] = str(last_update_time)
output += ':'.join(time) + ','
else:
output += lst_values[index] + ','
counter += 1
output = output.strip(',')
output += '\n'
print(output, file=edt_file)
cleaned_data.close()
edt_file.close()
def print_percentages_per_location(location, data_filename, type):
data = open(data_filename,"r")
header = True
for lines in data:
if header:
header = False
continue
else:
lines = lines.strip()
if lines == '':
continue
lst_values = lines.split(',')
if lst_values[1] == location:
confirmed_cases = int(lst_values[4])
num_types = 0
if type == "death":
num_types = int(lst_values[5])
elif type == "recovered":
num_types = int(lst_values[6])
if num_types >= confirmed_cases:
print("There were {} {} of {} confirmed cases, or approximately 100.000%".format(num_types,type,confirmed_cases))
else:
percent = num_types/confirmed_cases
print("There were {} {} of {} confirmed cases, or approximately {:.3%}".format(num_types,type,confirmed_cases,percent))
data.close()
def difference_in_cases(location1, location2,edt_file_name):
#location1 and location2 are the names of two states/provinces
#edt_file_name is the name of the input file
#This function prints the number of confirmed cases for each location1 and location2 and prints
#to the screen the difference (absolute value) between the number of confirmed cases in location1 and location2
#number of confirmed cases selected for each location is the first non-zero occurence found in the input file
#if there are no non-zero occurences or the location is not found, the value of 0 is used as the number of cases for each location
import math
data = open(edt_file_name,"r")
location1_cases = 0
location2_cases = 0
header = True
for lines in data:
if header:
header = False
continue #so header is not counted in this function
else:
lines = lines.strip()
if lines == '': #accounts for the last line, if it is blank
continue
lst_values = lines.split(',')
if lst_values[1] == location1 and location1_cases == 0:
location1_cases = int(lst_values[4])
elif lst_values[1] == location2 and location2_cases == 0:
location2_cases = int(lst_values[4])
if location1_cases != 0 and location2_cases != 0:
break
difference = int(math.fabs(location1_cases - location2_cases))
print("{}'s confirmed cases: {} \n{}'s confirmed cases: {} \nDifference: {}".format(location1,location1_cases,location2,location2_cases,difference))
data.close()
def main():
complete_data_filename = "03-25-2020.csv"
cleaned_data_filename = "CleanedCovidData.csv"
clean_data(complete_data_filename, cleaned_data_filename)
edt_file_name = "NewCovidData.csv"
convert_date_time_to_edt(cleaned_data_filename, edt_file_name)
location = "New York"
type = 'recovered'
print_percentages_per_location(location, edt_file_name, type)
print("What two states'/provinces' number of confirmed cases would you like to compare? ") #asking user names of states to compare their number of confirmed cases
location1 = input() #name of first state assigned to variable location1
location2 = input() #name of first state assigned to variable location2
difference_in_cases(location1, location2,edt_file_name) #Calls the function and gives the difference between the number of confirmed cases for location1 and location2
main()
| [
"noreply@github.com"
] | noreply@github.com |
3c1b9ff25bff83a5b2ab154d29fca1246527a50a | 5a18af4dd1eb7244ed6b75b8a59c29f7360cf468 | /pose_regression/models/top_models.py | 077581c48b4e505abe1ebd662626cb4a4f5d9cca | [] | no_license | asiron/deep-camera-relocalization | b585ef1b3ce63b307fcc57979eaf01462268a82c | 9277826c605be9a28deff81261dbc68b721c7ae4 | refs/heads/master | 2020-03-18T22:19:36.813331 | 2018-10-20T20:36:01 | 2018-10-20T20:36:01 | 135,338,870 | 3 | 1 | null | null | null | null | UTF-8 | Python | false | false | 5,466 | py | from keras.layers import (
Dropout, Dense, LSTM, ELU, GRU, CuDNNGRU, CuDNNLSTM, Lambda,
TimeDistributed, Activation, Bidirectional,
Reshape, Concatenate, PReLU, BatchNormalization)
from keras.regularizers import l2
from .layers import QuaternionNormalization
import keras.backend as K
import tensorflow as tf
class TopModel(object):
def __init__(self, **kwargs):
self.kwargs = kwargs
def build(self, input_tensor):
return Lambda(lambda x: x, name='prediction')(input_tensor)
class Regressor(TopModel):
def build(self, input_tensor):
assert len(input_tensor._keras_shape[1:]) in [1,2]
# dense_1 = Dense(input_tensor._keras_shape[1],
# activation='relu',
# ))(input_tensor)
dense_1 = Dense(self.kwargs['units'],
activation='relu',
kernel_regularizer=l2(self.kwargs['l2']))(input_tensor)
dropout_1 = Dropout(self.kwargs['dropout'])(dense_1)
dense_2 = Dense(7)(dropout_1)
quat_norm = QuaternionNormalization(name='quat_norm')(dense_2)
return super(Regressor, self).build(quat_norm)
class SpatialLSTM(TopModel):
def build(self, input_tensor):
assert len(input_tensor._keras_shape[1:]) is 1
dense_1 = Dense(2048,
activation='relu',
kernel_regularizer=l2(self.kwargs['l2']))(input_tensor)
rect_shape = (64, 32)
dropout_1 = Dropout(self.kwargs['dropout'])(dense_1)
reshaped = Reshape(rect_shape)(dropout_1)
reshaped_reversed = Lambda(lambda x: K.reverse(x, axes=1))(reshaped)
transposed = Lambda(lambda x: K.permute_dimensions(x, (0,2,1)))(reshaped)
transposed_reversed = Lambda(lambda x: K.reverse(x, axes=1))(transposed)
lstm_top_down = CuDNNLSTM(rect_shape[0], return_sequences=False)(reshaped)
lstm_bottom_up = CuDNNLSTM(rect_shape[0], return_sequences=False)(reshaped_reversed)
lstm_left_right = CuDNNLSTM(rect_shape[1], return_sequences=False)(transposed)
lstm_right_left = CuDNNLSTM(rect_shape[1], return_sequences=False)(transposed_reversed)
merged = Concatenate()([
lstm_left_right, lstm_right_left,
lstm_top_down, lstm_bottom_up
])
dense_2 = Dense(7)(merged)
quat_norm = QuaternionNormalization(name='quat_norm')(dense_2)
return super(SpatialLSTM, self).build(quat_norm)
class StatefulLSTM(TopModel):
def build(self, input_tensor):
assert len(input_tensor.shape[2:]) == 1
lstm_units = self.kwargs['units']
#dense_1 = TimeDistributed(Dense(1024, activation='relu'))(input_tensor)
lstm_1 = CuDNNGRU(512, return_sequences=True, stateful=True)(input_tensor)
dense_2 = TimeDistributed(Dense(7))(lstm_1)
quat_norm = TimeDistributed(QuaternionNormalization(name='quat_norm'))(dense_2)
return TimeDistributed(Lambda(lambda x: x, name='inner_prediction'), name='prediction')(quat_norm)
class StandardLSTM(TopModel):
def build(self, input_tensor):
assert len(input_tensor.shape[2:]) == 1
lstm_units = self.kwargs['units']
dense_1 = TimeDistributed(Dense(1024))(input_tensor)
merged_rev = TimeDistributed(Lambda(lambda x: K.reverse(x, axes=1)))(dense_1)
lstm_1 = CuDNNLSTM(512, return_sequences=True)(dense_1)
lstm_1_rev = CuDNNLSTM(512, return_sequences=True)(merged_rev)
merged_bidirectional = Concatenate(axis=-1)([lstm_1, lstm_1_rev])
dropout_1 = Dropout(self.kwargs['dropout'])(merged_bidirectional)
dense_2 = TimeDistributed(Dense(7))(dropout_1)
quat_norm = TimeDistributed(QuaternionNormalization(name='quat_norm'))(dense_2)
return TimeDistributed(Lambda(lambda x: x, name='inner_prediction'), name='prediction')(quat_norm)
'''
dense_1 = TimeDistributed(Dense(2048,
activation='relu',
kernel_regularizer=l2(self.kwargs['l2'])))(merged_bidirectional)
rect_shape = (64, 32)
reshaped = TimeDistributed(Reshape(rect_shape))(dense_1)
reshaped_reversed = TimeDistributed(Lambda(lambda x: K.reverse(x, axes=1)))(reshaped)
transposed = TimeDistributed(Lambda(lambda x: K.permute_dimensions(x, (0,2,1))))(reshaped)
transposed_reversed = TimeDistributed(Lambda(lambda x: K.reverse(x, axes=1)))(transposed)
lstm_top_down = TimeDistributed(CuDNNLSTM(rect_shape[0], return_sequences=False))(reshaped)
lstm_bottom_up = TimeDistributed(CuDNNLSTM(rect_shape[0], return_sequences=False))(reshaped_reversed)
lstm_left_right = TimeDistributed(CuDNNLSTM(rect_shape[1], return_sequences=False))(transposed)
lstm_right_left = TimeDistributed(CuDNNLSTM(rect_shape[1], return_sequences=False))(transposed_reversed)
merged_spatial = Concatenate(axis=-1)([
lstm_left_right, lstm_right_left,
lstm_top_down, lstm_bottom_up
])
merged_spatial_rev = TimeDistributed(Lambda(lambda x: K.reverse(x, axes=1)))(merged_spatial)
lstm_2 = CuDNNLSTM(2*rect_shape[0] + 2*rect_shape[1], return_sequences=True)(merged_spatial)
lstm_2_rev = CuDNNLSTM(512, return_sequences=True)(merged_spatial_rev)
merged_bidirectional = Concatenate(axis=-1)([lstm_2, lstm_2_rev])
dense_2 = TimeDistributed(Dense(7))(merged_bidirectional)
quat_norm = TimeDistributed(QuaternionNormalization(name='quat_norm'))(dense_2)
return TimeDistributed(Lambda(lambda x: x, name='inner_prediction'), name='prediction')(quat_norm)
''' | [
"maciej.zurad@gmail.com"
] | maciej.zurad@gmail.com |
58e2029cc20575a0699ac989d2bd2bceb0f0ad0d | 5c14e3a42410b386b8a062ad5c8ef4d35b54c10e | /LabQuestion4.py | b809de618049996ab3598d3ff07ddbd8829a6e23 | [] | no_license | CStratton00/CST-215-Programming-Assignments | ae158504dca1b1dbf85b73cb8f6967f353c6e0ca | cc72b91380dd2dec23ed1adce8461b3d399ce34e | refs/heads/main | 2023-02-27T13:42:29.074624 | 2021-02-10T18:16:06 | 2021-02-10T18:16:06 | 337,812,288 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,303 | py | A = True
B = True
def APT(): return "T" if(A == True) else "F"
def BPT(): return "T" if(B == True) else "F"
def abAND(): return "T" if(A and B) else "F"
def abOR(): return "T" if(A or B) else "F"
def abNAND(): return "T" if(not(A and B)) else "F"
def abNOR(): return "T" if(not(A or B)) else "F"
def abXOR(): return "T" if(A != B) else "F"
def aNOT(): return "T" if(not(A)) else "F"
print("<----------------------------->")
print("| And Gate |")
print("<----------------------------->")
print("| A = " + APT() + ", B = " + BPT() + " | A and B = " + abAND() + " |")
B = False
print("| A = " + APT() + ", B = " + BPT() + " | A and B = " + abAND() + " |")
A = False
B = True
print("| A = " + APT() + ", B = " + BPT() + " | A and B = " + abAND() + " |")
B = False
print("| A = " + APT() + ", B = " + BPT() + " | A and B = " + abAND() + " |")
print("<----------------------------->")
A = True
B = True
print("<----------------------------->")
print("| OR Gate |")
print("<----------------------------->")
print("| A = " + APT() + ", B = " + BPT() + " | A or B = " + abOR() + " |")
B = False
print("| A = " + APT() + ", B = " + BPT() + " | A or B = " + abOR() + " |")
A = False
B = True
print("| A = " + APT() + ", B = " + BPT() + " | A or B = " + abOR() + " |")
B = False
print("| A = " + APT() + ", B = " + BPT() + " | A or B = " + abOR() + " |")
print("<----------------------------->")
A = True
B = True
print("<----------------------------->")
print("| NAND Gate |")
print("<----------------------------->")
print("| A = " + APT() + ", B = " + BPT() + " | A nand B = " + abNAND() + " |")
B = False
print("| A = " + APT() + ", B = " + BPT() + " | A nand B = " + abNAND() + " |")
A = False
B = True
print("| A = " + APT() + ", B = " + BPT() + " | A nand B = " + abNAND() + " |")
B = False
print("| A = " + APT() + ", B = " + BPT() + " | A nand B = " + abNAND() + " |")
print("<----------------------------->")
A = True
B = True
print("<----------------------------->")
print("| NOR Gate |")
print("<----------------------------->")
print("| A = " + APT() + ", B = " + BPT() + " | A nor B = " + abNOR() + " |")
B = False
print("| A = " + APT() + ", B = " + BPT() + " | A nor B = " + abNOR() + " |")
A = False
B = True
print("| A = " + APT() + ", B = " + BPT() + " | A nor B = " + abNOR() + " |")
B = False
print("| A = " + APT() + ", B = " + BPT() + " | A nor B = " + abNOR() + " |")
print("<----------------------------->")
A = True
B = True
print("<----------------------------->")
print("| XOR Gate |")
print("<----------------------------->")
print("| A = " + APT() + ", B = " + BPT() + " | A xor B = " + abXOR() + " |")
B = False
print("| A = " + APT() + ", B = " + BPT() + " | A xor B = " + abXOR() + " |")
A = False
B = True
print("| A = " + APT() + ", B = " + BPT() + " | A xor B = " + abXOR() + " |")
B = False
print("| A = " + APT() + ", B = " + BPT() + " | A xor B = " + abXOR() + " |")
print("<----------------------------->")
A = True
print("<----------------->")
print("| Not Gate |")
print("<----------------->")
print("| A = " + APT() + " | A' = " + aNOT() + " |")
A = False
print("| A = " + APT() + " | A' = " + aNOT() + " |")
print("<----------------->") | [
"noreply@github.com"
] | noreply@github.com |
46e425071b72856e84300bad5e705cc2c7dff76d | 800b5cd8c3d58b60d80aca551e54af28ec3c9f18 | /code/chapter_05_example_14.py | 81334fc4e4cc158d144cc5ba91bcb59c006f0045 | [] | no_license | CyberLight/two-scoops-of-django-1.8 | 6591347cb20f3c16e252943c04f0f524f8e8b235 | 423971ad609ec9a552617fc4f7424e701295c09b | refs/heads/master | 2021-01-21T03:02:52.704822 | 2015-05-11T16:32:31 | 2015-05-11T16:32:31 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,626 | py | """
Using This Code Example
=========================
The code examples provided are provided by Daniel Greenfeld and Audrey Roy of
Two Scoops Press to help you reference Two Scoops of Django: Best Practices
for Django 1.8. Code samples follow PEP-0008, with exceptions made for the
purposes of improving book formatting. Example code is provided "as is", and
is not intended to be, and should not be considered or labeled as "tutorial code".
Permissions
============
In general, you may use the code we've provided with this book in your programs
and documentation. You do not need to contact us for permission unless you're
reproducing a significant portion of the code or using it in commercial
distributions. Examples:
* Writing a program that uses several chunks of code from this course does not require permission.
* Selling or distributing a digital package from material taken from this book does require permission.
* Answering a question by citing this book and quoting example code does not require permission.
* Incorporating a significant amount of example code from this book into your product's documentation does require permission.
Attributions usually include the title, author, publisher and an ISBN. For
example, "Two Scoops of Django: Best Practices for Django 1.8, by Daniel
Roy Greenfeld and Audrey Roy Greenfeld. Copyright 2015 Two Scoops Press (ISBN-GOES-HERE)."
If you feel your use of code examples falls outside fair use of the permission
given here, please contact us at info@twoscoopspress.org."""
# Top of settings/production.py
import os
SOME_SECRET_KEY = os.environ["SOME_SECRET_KEY"]
| [
"danny@eventbrite.com"
] | danny@eventbrite.com |
42f3d2a0282e52d317b7e91d150c762f62c4d55a | 5bd79964e4770770e972551b017d990036c1c600 | /code/generateMeetings.py | d003722bafaaa4618abe9ee18b10542604c4c14b | [
"Apache-2.0"
] | permissive | IIIF/trc | 8d63fd200927d6ad0170a03d8afc2d97cf193b59 | d7226551a3bd081e9ff7518b8037527f9f041c6d | refs/heads/master | 2021-08-04T11:14:03.504771 | 2021-07-21T22:29:23 | 2021-07-21T22:29:23 | 157,777,140 | 1 | 1 | Apache-2.0 | 2020-12-03T12:36:13 | 2018-11-15T21:50:00 | Python | UTF-8 | Python | false | false | 1,795 | py | #!/usr/bin/python
import sys
from ics import Calendar, Event
from datetime import datetime,timedelta
from dateutil import tz
def timezone(timeInstance, timezone):
return timeInstance.astimezone(tz.gettz(timezone)).time()
if __name__ == "__main__":
if len(sys.argv) != 3 and len(sys.argv) != 4:
print ('Usage:\n\t./code/calendar.py [start_date YYYY-MM-DD] [Occurrence count] [Japan time frequency]')
sys.exit(0)
if len(sys.argv) == 3:
frequency = 4
else:
frequency = int(sys.argv[3])
cal = Calendar()
first_meeting = datetime(int(sys.argv[1][0:4]), int(sys.argv[1][5:7]), int(sys.argv[1][8:10]), 12, 0, 0, 0, tz.gettz('America/New_York'))
occurences = sys.argv[2]
next_meeting = first_meeting
for i in range(int(occurences)):
issues_shared = next_meeting - timedelta(days=7)
voting_closes = next_meeting + timedelta(days=7*2)
if i % frequency == 0 and i != 0:
meeting_time = next_meeting.replace(hour=19)
else:
meeting_time = next_meeting
timestr = '{} Europe / {} UK / {} US Eastern / {} US Pacific / {} Japan'.format(timezone(meeting_time, 'Europe/Paris'), timezone(meeting_time, 'Europe/London'), timezone(meeting_time, 'America/New_York'), timezone(meeting_time, 'America/Los_Angeles'), timezone(meeting_time, 'Asia/Tokyo'))
print ('TRC meeting: {} ({}), \nSend out issues: {}, \nVoting closes: {}\n'.format(meeting_time.date(), timestr, issues_shared.date(), voting_closes.date()))
e = Event()
e.name = 'IIIF Technical Review Committee'
e.begin = next_meeting
cal.events.add(e)
next_meeting += timedelta(days=4*7)
with open('/tmp/trc.ics', 'w') as ics_file:
ics_file.writelines(cal)
| [
"glen.robson@gmail.com"
] | glen.robson@gmail.com |
ebfb29af4611b4bc8dec9a2d065d6577a3201c0f | db70c979a9d1002cb2dfe3ea7028957402782fd8 | /tests/test_success_range_below_equal.py | 55f9e2d0ee784cc8aee547cc5960c654ad221f1f | [
"MIT"
] | permissive | Bernardo-MG/wargame_analysis_jupyter_notebook | 739d94b697bf103d0c563d4dcedc9e0fb1890606 | db13838ce0f8c6dcbc160259c1ee0ae258b51ba7 | refs/heads/master | 2022-12-15T12:15:32.798807 | 2020-08-26T06:38:27 | 2020-08-26T06:38:27 | 289,078,376 | 0 | 0 | MIT | 2020-08-26T06:38:28 | 2020-08-20T18:20:02 | Python | UTF-8 | Python | false | false | 8,937 | py | # -*- coding: utf-8 -*-
import unittest
from decimal import Decimal
from scripts.probability import roll_success_range
"""
Max shots script tests.
"""
__author__ = 'Bernardo Martínez Garrido'
__license__ = 'MIT'
class TestZeroToTenBelowEqual(unittest.TestCase):
"""
Tests the chance to go above with the range [0,10].
"""
def test_goal_0(self):
chance = roll_success_range(0, 10, 0, above=False, equal=True)
self.assertEqual({"min": 0, "max": 0}, chance)
def test_goal_1(self):
chance = roll_success_range(0, 10, 1, above=False, equal=True)
self.assertEqual({"min": 0, "max": 1}, chance)
def test_goal_2(self):
chance = roll_success_range(0, 10, 2, above=False, equal=True)
self.assertEqual({"min": 0, "max": 2}, chance)
def test_goal_3(self):
chance = roll_success_range(0, 10, 3, above=False, equal=True)
self.assertEqual({"min": 0, "max": 3}, chance)
def test_goal_4(self):
chance = roll_success_range(0, 10, 4, above=False, equal=True)
self.assertEqual({"min": 0, "max": 4}, chance)
def test_goal_5(self):
chance = roll_success_range(0, 10, 5, above=False, equal=True)
self.assertEqual({"min": 0, "max": 5}, chance)
def test_goal_6(self):
chance = roll_success_range(0, 10, 6, above=False, equal=True)
self.assertEqual({"min": 0, "max": 6}, chance)
def test_goal_7(self):
chance = roll_success_range(0, 10, 7, above=False, equal=True)
self.assertEqual({"min": 0, "max": 7}, chance)
def test_goal_8(self):
chance = roll_success_range(0, 10, 8, above=False, equal=True)
self.assertEqual({"min": 0, "max": 8}, chance)
def test_goal_9(self):
chance = roll_success_range(0, 10, 9, above=False, equal=True)
self.assertEqual({"min": 0, "max": 9}, chance)
def test_goal_10(self):
chance = roll_success_range(0, 10, 10, above=False, equal=True)
self.assertEqual({"min": 0, "max": 10}, chance)
def test_goal_above_max(self):
chance = roll_success_range(0, 10, 20, above=False, equal=True)
self.assertEqual({"min": 0, "max": 10}, chance)
def test_goal_below_min(self):
chance = roll_success_range(0, 10, -1, above=False, equal=True)
self.assertEqual(None, chance)
class TestOneToTenBelowEqual(unittest.TestCase):
"""
Tests the chance to go above with the range [1,10].
"""
def test_goal_0(self):
chance = roll_success_range(1, 10, 0, above=False, equal=True)
self.assertEqual(None, chance)
def test_goal_1(self):
chance = roll_success_range(1, 10, 1, above=False, equal=True)
self.assertEqual({"min": 1, "max": 1}, chance)
def test_goal_2(self):
chance = roll_success_range(1, 10, 2, above=False, equal=True)
self.assertEqual({"min": 1, "max": 2}, chance)
def test_goal_3(self):
chance = roll_success_range(1, 10, 3, above=False, equal=True)
self.assertEqual({"min": 1, "max": 3}, chance)
def test_goal_4(self):
chance = roll_success_range(1, 10, 4, above=False, equal=True)
self.assertEqual({"min": 1, "max": 4}, chance)
def test_goal_5(self):
chance = roll_success_range(1, 10, 5, above=False, equal=True)
self.assertEqual({"min": 1, "max": 5}, chance)
def test_goal_6(self):
chance = roll_success_range(1, 10, 6, above=False, equal=True)
self.assertEqual({"min": 1, "max": 6}, chance)
def test_goal_7(self):
chance = roll_success_range(1, 10, 7, above=False, equal=True)
self.assertEqual({"min": 1, "max": 7}, chance)
def test_goal_8(self):
chance = roll_success_range(1, 10, 8, above=False, equal=True)
self.assertEqual({"min": 1, "max": 8}, chance)
def test_goal_9(self):
chance = roll_success_range(1, 10, 9, above=False, equal=True)
self.assertEqual({"min": 1, "max": 9}, chance)
def test_goal_10(self):
chance = roll_success_range(1, 10, 10, above=False, equal=True)
self.assertEqual({"min": 1, "max": 10}, chance)
def test_goal_above_max(self):
chance = roll_success_range(1, 10, 20, above=False, equal=True)
self.assertEqual({"min": 1, "max": 10}, chance)
def test_goal_below_min(self):
chance = roll_success_range(1, 10, -1, above=False, equal=True)
self.assertEqual(None, chance)
class TestTenToOneHundredBelowEqual(unittest.TestCase):
"""
Tests the chance to go above with the range [10,100].
"""
def test_no_goal(self):
chance = roll_success_range(10, 100, 0, above=False, equal=True)
self.assertEqual(None, chance)
def test_goal_at_max(self):
chance = roll_success_range(10, 100, 100, above=False, equal=True)
self.assertEqual({"min": 10, "max": 100}, chance)
def test_goal_at_min(self):
chance = roll_success_range(10, 100, 10, above=False, equal=True)
self.assertEqual({"min": 10, "max": 10}, chance)
def test_goal_at_middle(self):
chance = roll_success_range(10, 100, 50, above=False, equal=True)
self.assertEqual({"min": 10, "max": 50}, chance)
def test_goal_close_to_max(self):
chance = roll_success_range(10, 100, 80, above=False, equal=True)
self.assertEqual({"min": 10, "max": 80}, chance)
def test_goal_above_max(self):
chance = roll_success_range(10, 100, 200, above=False, equal=True)
self.assertEqual({"min": 10, "max": 100}, chance)
def test_goal_just_below_middle(self):
chance = roll_success_range(10, 100, 40, above=False, equal=True)
self.assertEqual({"min": 10, "max": 40}, chance)
def test_goal_just_below_max(self):
chance = roll_success_range(10, 100, 90, above=False, equal=True)
self.assertEqual({"min": 10, "max": 90}, chance)
def test_goal_below_min(self):
chance = roll_success_range(10, 100, 5, above=False, equal=True)
self.assertEqual(None, chance)
def test_goal_just_below_min(self):
chance = roll_success_range(10, 100, 9, above=False, equal=True)
self.assertEqual(None, chance)
class Test1d6BelowEqual(unittest.TestCase):
"""
Tests the chance to go above with the range [1,6], which is the range of a six sides die.
"""
def test_no_goal(self):
chance = roll_success_range(1, 6, 0, above=False, equal=True)
self.assertEqual(None, chance)
def test_goal_above_max(self):
chance = roll_success_range(1, 6, 10, above=False, equal=True)
self.assertEqual({"min": 1, "max": 6}, chance)
def test_goal_1(self):
chance = roll_success_range(1, 6, 1, above=False, equal=True)
self.assertEqual({"min": 1, "max": 1}, chance)
def test_goal_2(self):
chance = roll_success_range(1, 6, 2, above=False, equal=True)
self.assertEqual({"min": 1, "max": 2}, chance)
def test_goal_3(self):
chance = roll_success_range(1, 6, 3, above=False, equal=True)
self.assertEqual({"min": 1, "max": 3}, chance)
def test_goal_4(self):
chance = roll_success_range(1, 6, 4, above=False, equal=True)
self.assertEqual({"min": 1, "max": 4}, chance)
def test_goal_5(self):
chance = roll_success_range(1, 6, 5, above=False, equal=True)
self.assertEqual({"min": 1, "max": 5}, chance)
def test_goal_6(self):
chance = roll_success_range(1, 6, 6, above=False, equal=True)
self.assertEqual({"min": 1, "max": 6}, chance)
class Test1d6Norm0BelowEqual(unittest.TestCase):
"""
Tests the chance to go above with the range [1,6], which is the range of a six sides die.
"""
def test_goal_above_max(self):
chance = roll_success_range(0, 5, 6, above=False, equal=True)
self.assertEqual({"min": 0, "max": 5}, chance)
def test_goal_0(self):
chance = roll_success_range(0, 5, 0, above=False, equal=True)
self.assertEqual({"min": 0, "max": 0}, chance)
def test_goal_1(self):
chance = roll_success_range(0, 5, 1, above=False, equal=True)
self.assertEqual({"min": 0, "max": 1}, chance)
def test_goal_2(self):
chance = roll_success_range(0, 5, 2, above=False, equal=True)
self.assertEqual({"min": 0, "max": 2}, chance)
def test_goal_3(self):
chance = roll_success_range(0, 5, 3, above=False, equal=True)
self.assertEqual({"min": 0, "max": 3}, chance)
def test_goal_4(self):
chance = roll_success_range(0, 5, 4, above=False, equal=True)
self.assertEqual({"min": 0, "max": 4}, chance)
def test_goal_5(self):
chance = roll_success_range(0, 5, 5, above=False, equal=True)
self.assertEqual({"min": 0, "max": 5}, chance)
| [
"programming@bernardomg.com"
] | programming@bernardomg.com |
bb9382cac06758cde9a8cf0d6815e7c641a53b4e | cfe31cde0d64026925c9a5747216ba83856122d8 | /Sex_Determination/parallel_ASEreadcounter_XIST.py | 630398e4cecd81c226a883b9bbf1f9c81b1d5fbe | [
"MIT"
] | permissive | SViswanathanLab/XIST-Males-Scripts | f8d4fe22dee4d90f8a7a343a1138e721f3e5473a | 4f2a0e281296df1dd86e3c68b13192e3337c1e8a | refs/heads/main | 2023-04-11T05:52:21.329845 | 2022-08-20T22:01:28 | 2022-08-20T22:01:28 | 400,788,194 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,182 | py | #qsub -t 1:86 submit_script.qsub
import glob, os
task_id = int(os.getenv("SGE_TASK_ID"))
temp_path = "download_dir/" #where files are downloaded to
parameters_list = [x[0].split("/")[-1] for x in os.walk(temp_path)]
samples_per_node = 18
val = min(task_id*samples_per_node, len(parameters_list))
parameter_sublist = parameters_list[(task_id - 1)*samples_per_node:val]
def runSimulation(parameter):
os.system("java -Djava.io.tmpdir=xist_aser -Xmx8000m -jar gatk.jar ASEReadCounter -R Homo_sapiens_assembly38.fasta --read-filter PassesVendorQualityCheckReadFilter --read-filter HasReadGroupReadFilter --read-filter NotDuplicateReadFilter --read-filter MappingQualityAvailableReadFilter --read-filter NotSecondaryAlignmentReadFilter --read-filter MappingQualityReadFilter --minimum-mapping-quality 30 --read-filter OverclippedReadFilter --filter-too-short 25 --read-filter GoodCigarReadFilter --read-filter AmbiguousBaseReadFilter -V hapmap_3.3.hg38.vcf.gz --lenient --seconds-between-progress-updates 100 -I $temp_path/%s/normal.bam -L chrX -O output_dir/%s.out" % (parameter,parameter))
for parameter in parameter_sublist:
runSimulation(parameter = parameter)
| [
"noreply@github.com"
] | noreply@github.com |
390d44eedc5bd62912d37c37ae5ccbcd9582d8af | 3d6787af8b9bb74b7a80e6b51ea9d64d01455d73 | /opinion.mining.from.online.hotel.reviews.a.text.summerization.approach/models/authorcredibility.py | e18717fcb24c2c86d897c5d80646285928bd5ef2 | [] | no_license | beiranvand-karim/data.mining | bbac24d3ffa93c382cb4b5c250e2d22552d55c8d | 85437e59792c2369581efbe76e0dd0d815f9f4e7 | refs/heads/master | 2020-04-04T23:54:42.834596 | 2018-11-29T12:23:16 | 2018-11-29T12:23:16 | 156,376,046 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 134 | py | from mongoengine import *
class AuthorCredibility(Document):
authorId = ObjectIdField(required=True)
measure = FloatField()
| [
"beiranvand.karim@gmail.com"
] | beiranvand.karim@gmail.com |
5c27bedc6a666dd8e3f85a7a92005b76c278ea8c | 51a705c1c3c749cd339ebdfc1997770e9de0f71e | /partie2/partie_2.py | a1844d5e76b369dec7eed1c19e1aa6eb679c29d6 | [] | no_license | jabertSI/Gps-haversin-to-kml | 49c932cadf25fb123d435acdbf3385897d06fc1e | 163e862185e3e1d670ed52c1a6389a06f9f9ec28 | refs/heads/master | 2021-06-12T08:24:45.823195 | 2017-03-10T13:26:44 | 2017-03-10T13:26:44 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,746 | py | from math import radians, cos, sin, atan2, sqrt, ceil
terre = 6371
# LECTURE DU FICHIER
fichier_ville = 'ville.txt'
with open(fichier_ville, 'r') as f:
# Creation liste de liste
lines = [line.strip('\n') for line in f.readlines()]
carnet = [line.split() for line in lines] #Decoupage de la chaine de caractere
# Conversion string to FLoat
print("INFORMATION VILLE :")
for adr in carnet:
adr[1] = float(adr[1])
adr[2] = float(adr[2])
print(adr[0] + ' : latitude = ' + str(adr[1]) + ' : longitude = ' + str(adr[2]) + ': altitude = ' + adr[3] + "m")
for ville1 in carnet:
ville1[1], ville1[2] = map(radians, [ville1[1], ville1[2]]) # conversion degre en radian
distances = []
# Generateur de couple
print("DISTANCE ENTRE LES VILLES :")
for ville1 in carnet:
for ville2 in carnet:
if ville1 != ville2:
# Calcule de la distance.
lat = ville2[1] - ville1[1] # delta latitude
lng = ville2[2] - ville1[2] # delta longitude
# formule haversine
d = sin(lat * 0.5) ** 2 + cos(ville1[1]) * cos(ville2[1]) * sin(lng * 0.5) ** 2
h = 2 * atan2(sqrt(d),sqrt(1-d))
distance = h * terre
print("La distance entre", ville1[0],"et", ville2[0], "est de", ceil(distance), "km") # 90 couple possible
# Fin partie 1
# Debut partie 2
distances.append([ville1[0], ville2[0], ceil(distance)]) # Création de la list afin de stock les couples avec leurs distance antre elles
distances.sort(key=lambda x:x[2]) # FOnction qui dit que le tri se fait sur la key n° 2 donc la distance en km
for dist in distances:
print("La distance entre", dist[0], "et", dist[1], "est de", ceil(dist[2]), "km") # Affichage super beau
| [
"noreply@github.com"
] | noreply@github.com |
d60cd1bfe7525f7f1d1505b330008095c64c52b2 | 5e59252778f8b6465f6e9c4a1890297624cab8f8 | /shell.py | 15b5a123b00f2886e529971c6a178f4639a69ac8 | [] | no_license | tazjel/rpathcmd | fa62dfed77d56ea100c8f76a035486b2761058ee | 0ebffe639f329665824fdd94d8b5c89ce695f153 | refs/heads/master | 2021-01-16T20:03:25.225459 | 2012-11-05T16:09:17 | 2012-11-05T16:09:17 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,104 | py | #
# Licensed under the GNU General Public License Version 3
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright 2012 James Tanner <tanner.jc@gmail.com>
#
# NOTE: the 'self' variable is an instance of RpathShell
import atexit, logging, os, readline, re, sys
from cmd import Cmd
from pwd import getpwuid
from rpathcmd.utils import *
import pdb
class RpathShell(Cmd):
__module_list = [ 'api', 'projects', 'groups', 'systems', 'images', 'platforms', 'targets', 'packages']
# a SyntaxError is thrown if we don't wrap this in an 'exec'
for module in __module_list:
exec 'from %s import *' % module
# maximum length of history file
HISTORY_LENGTH = 1024
cmdqueue = []
completekey = 'tab'
stdout = sys.stdout
#prompt_template = 'rpathcmd {SSM:##}> '
prompt_template = 'rpathcmd> '
current_line = ''
# do nothing on an empty line
emptyline = lambda self: None
def __init__(self, options):
self.session = ''
self.username = ''
self.server = ''
self.ssm = {}
self.postcmd(False, '')
# make the options available everywhere
self.options = options
#pdb.set_trace()
userinfo = getpwuid(os.getuid())
self.conf_dir = os.path.join(userinfo[5], '.spacecmd')
try:
if not os.path.isdir(self.conf_dir):
os.mkdir(self.conf_dir, 0700)
except OSError:
logging.error('Could not create directory %s' % self.conf_dir)
self.history_file = os.path.join(self.conf_dir, 'history')
try:
# don't split on hyphens or colons during tab completion
newdelims = readline.get_completer_delims()
newdelims = re.sub(':|-|/', '', newdelims)
readline.set_completer_delims(newdelims)
if not options.nohistory:
try:
if os.path.isfile(self.history_file):
readline.read_history_file(self.history_file)
readline.set_history_length(self.HISTORY_LENGTH)
# always write the history file on exit
atexit.register(readline.write_history_file,
self.history_file)
except IOError:
logging.error('Could not read history file')
except:
pass
# handle commands that exit the shell
def precmd(self, line):
# remove leading/trailing whitespace
line = re.sub('^\s+|\s+$', '', line)
# don't do anything on empty lines
if line == '':
return ''
# terminate the shell
if re.match('quit|exit|eof', line, re.I):
print
sys.exit(0)
# don't attempt to login for some commands
if re.match('help|login|logout|whoami|history|clear', line, re.I):
return line
# login before attempting to run a command
#if not self.session:
#pdb.set_trace()
#self.do_login('')
#if self.session == '': return ''
parts = line.split()
if len(parts):
command = parts[0]
else:
return ''
if len(parts[1:]):
args = ' '.join(parts[1:])
else:
args = ''
# print the help message if the user passes '--help'
if re.search('--help', line):
return 'help %s' % command
# should we look for an item in the history?
if command[0] != '!' or len(command) < 2:
return line
# remove the '!*' line from the history
self.remove_last_history_item()
history_match = False
if command[1] == '!':
# repeat the last command
line = readline.get_history_item(
readline.get_current_history_length())
if line:
history_match = True
else:
logging.warning('%s: event not found' % command)
return ''
# attempt to find a numbered history item
if not history_match:
try:
number = int(command[1:])
line = readline.get_history_item(number)
if line:
history_match = True
else:
raise Exception
except IndexError:
pass
except ValueError:
pass
# attempt to match the beginning of the string with a history item
if not history_match:
history_range = range(1, readline.get_current_history_length())
history_range.reverse()
for i in history_range:
item = readline.get_history_item(i)
if re.match(command[1:], item):
line = item
history_match = True
break
# append the arguments to the substituted command
if history_match:
line += ' %s' % args
readline.add_history(line)
print line
return line
else:
logging.warning('%s: event not found' % command)
return ''
# update the prompt with the SSM size
def postcmd(self, stop, line):
self.prompt = re.sub('##', str(len(self.ssm)), self.prompt_template)
# vim:ts=4:expandtab:
| [
"tanner.jc@gmail.com"
] | tanner.jc@gmail.com |
2241916c7d68776e94af575a2559596e236b1ca4 | 6c298f03496560276fb9f478cbefc218ecd24e9a | /VoiceInput/program/lib/voiceinput.py | 7f661347d3c4a859be5930192ef02c22284a2b7f | [] | no_license | koenschepens/OldPhone | 1f3fccd6018e14e779373243a0e90a759a7425f9 | 5ac9247d0c9e08d6af8fb384479c53b48c174aa6 | refs/heads/master | 2021-01-10T08:31:43.368378 | 2016-03-26T19:06:07 | 2016-03-26T19:06:07 | 43,725,567 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,048 | py | import sys
import xbmc, xbmcgui, xbmcaddon
try:
import simplejson
except ImportError:
import json as simplejson
import httplib
__addon__ = xbmcaddon.Addon()
__cwd__ = __addon__.getAddonInfo('path').decode("utf-8")
__language__ = __addon__.getLocalizedString
class InputWindow(xbmcgui.WindowXMLDialog):
def __init__( self, *args, **kwargs ):
self.Kodi14 = False
self.CTL_NUM_START = 48
self.CTL_NUM_END = 57
self.CTL_LABEL_EDIT = 310
self.strEdit = kwargs.get("default").decode('utf-8') or u""
self.strHeading = kwargs.get("heading") or ""
self.bIsConfirmed = False
self.oldPhone = True
self.keyType = LOWER
self.words = []
self.hzcode = ''
self.pos = 0
self.num = 0
xbmcgui.WindowXMLDialog.__init__(self)
xbmc.log(msg="HEE HALLO@!!", level=xbmc.LOGDEBUG)
def initControl(self):
pEdit = self.getControl(self.CTL_LABEL_EDIT)
px = pEdit.getX()
py = pEdit.getY()
pw = pEdit.getWidth()
ph = pEdit.getHeight()
self.listw = pw - 95
self.CTL_HZCODE = xbmcgui.ControlLabel(px, py + ph, 90, 30, '')
self.CTL_HZLIST = xbmcgui.ControlLabel(px + 95, py + ph, pw - 95, 30, '')
self.addControl(self.CTL_HZCODE)
self.addControl(self.CTL_HZLIST)
def getText(self):
return "MONGOL!"
class Keyboard:
def __init__( self, default='', heading='' ):
self.bIsConfirmed = False
self.strEdit = default
self.strHeading = heading
def doModal (self):
self.win = InputWindow("DialogKeyboard.xml", __cwd__, heading=self.strHeading, default=self.strEdit )
self.win.doModal()
self.bIsConfirmed = self.win.isConfirmed()
self.strEdit = self.win.getText()
del self.win
def setHeading(self, heading):
self.strHeading = "WHOWHOWWWWOOOOO"
def isConfirmed(self):
return self.bIsConfirmed
def getText(self):
return "youtube" | [
"kschepens@gmail.com"
] | kschepens@gmail.com |
bb3553154a2dbaa5f002445d6690063caaacc7ac | 274b4c50375c2cf62cec65805efade97931ccf18 | /bikeshare.py | 56ceefcd06dabd949286ac737cc8b92733dbd86c | [] | no_license | JonJacobs770/pdsnd_github | 65c65a48bf8bf32266823085d2454005422938c7 | 13f728205001c715de8ef33d98266c4e379588f0 | refs/heads/master | 2022-08-21T10:02:14.425315 | 2020-05-31T11:19:00 | 2020-05-31T11:19:00 | 268,144,699 | 0 | 0 | null | 2020-05-30T19:15:45 | 2020-05-30T19:15:44 | null | UTF-8 | Python | false | false | 11,355 | py | import time
import pandas as pd
import numpy as np
import math
CITY_DATA = { 'chicago': 'chicago.csv',
'new york': 'new_york_city.csv',
'washington': 'washington.csv' }
def get_filters():
"""
Asks user to specify a city, month, and day to analyze.
Returns:
(str) city - name of the city to analyze
(str) month - name of the month to filter by, or "all" to apply no month filter
(str) day - name of the day of week to filter by, or "all" to apply no day filter
"""
print('Hello! Let\'s explore some US bikeshare data!\nWhich of the following cities would you like to see more information about Chicago, New York, or Washington?\n')
# TO DO: get user input for city (chicago, new york city, washington). HINT: Use a while loop to handle invalid inputs
while True:
city_chosen = input().lower()
if city_chosen.lower() not in CITY_DATA.keys():
print("That is not one of the available cities. Please select Chicago, New York, or Washington.")
continue
else:
print('Nice choice! We\'ll use %s.' % city_chosen.lower().title())
break
# TO DO: get user input for month (all, january, february, ... , june)
while True:
month_chosen = input("\n In which month would you like to see data for? January, February, March, April, May, June. Type 'all' if you do not have any preference?\n").lower()
if month_chosen not in ('january', 'february', 'march', 'april', 'may', 'june', 'all'):
print("It seems you have either not entered the month's full name or you have entered a month on a different planet. Please, try again.")
continue
else:
print('Ok then! We\'ll use %s.' % month_chosen.lower().title())
break
# TO DO: get user input for day of week (all, monday, tuesday, ... sunday)
while True:
day_chosen = input("\nAre you looking for a particular day? If so, kindly enter the day as follows: Sunday, Monday, Tuesday, Wednesday, Thursday, Friday, Saturday or type 'all' if you do not have any preference.\n").lower()
if day_chosen not in ('sunday', 'monday', 'tuesday', 'wednesday', 'thursday', 'friday', 'saturday', 'all'):
print("It seems you have not entered a valid day of the week. kindly enter the day as follows: Sunday, Monday, Tuesday, Wednesday, Thursday, Friday, Saturday or type 'all' if you do not have any preference.\n")
continue
else:
print('Sounds good! We\'ll use %s.' % day_chosen.lower().title())
break
print('-'*40)
return city_chosen, month_chosen, day_chosen
def load_data(city, month, day):
"""
Loads data for the specified city and filters by month and day if applicable.
Args:
(str) city - name of the city to analyze
(str) month - name of the month to filter by, or "all" to apply no month filter
(str) day - name of the day of week to filter by, or "all" to apply no day filter
Returns:
df - Pandas DataFrame containing city data filtered by month and day
"""
# load the city data file into a dataframe
df = pd.read_csv(CITY_DATA[city])
# convert the Start Time column to datetime
df['Start Time'] = pd.to_datetime(df['Start Time'])
# extract month, day of week and start hour from Start Time to create new columns
df['month'] = df['Start Time'].dt.month
df['day_of_week'] = df['Start Time'].dt.day_name()
df['hour'] = df['Start Time'].dt.hour
# filter by month if applicable
if month != 'all':
# use the index of the months list to get the corresponding int
months = ['january', 'february', 'march', 'april', 'may', 'june']
month = months.index(month) + 1
# filter by month to create the new dataframe
df = df[df['month'] == month]
# filter by day of week if applicable
if day != 'all':
# filter by day of week to create the new dataframe
df = df[df['day_of_week'] == day.title()]
return df
def time_stats(df):
"""Displays statistics on the most frequent times of travel."""
print('\nCalculating The Most Frequent Times of Travel...\n')
start_time = time.time()
# TO DO: display the most popular month
month_list = ['january','february','march','april','may','june','all']
common_month_num = df['month'].mode()[0]
popular_month = month_list[common_month_num-1].title()
print('Most popular month:', popular_month)
# TO DO: display the most popular day of week
popular_day = df['day_of_week'].mode()[0]
print('Most popular day:', popular_day)
# TO DO: display the most popular start hour
popular_start_hour = df['hour'].mode()[0]
print('Most common hour:', popular_start_hour)
print("\nThis took %s seconds." % (time.time() - start_time))
print('-'*40)
def station_stats(df):
"""Displays statistics on the most popular stations and trip."""
print('\nDetermining the most Popular Stations and Trip...\n')
start_time = time.time()
# TO DO: display most commonly used start station
# idxmx() get the row label of the maximum value
Start_Station = df['Start Station'].value_counts().idxmax()
print('Most Commonly used start station:', Start_Station)
# TO DO: display most commonly used end station
End_Station = df['End Station'].value_counts().idxmax()
print('\nMost Commonly used end station:', End_Station)
# TO DO: display most frequent combination of start station and end station trip
frequent_journey =df.groupby(['Start Station', 'End Station']).size().nlargest(1)
print("\nThe most frequent trip from start to end is:\n{}".format(frequent_journey))
print("\nThis took %s seconds." % (time.time() - start_time))
print('-'*40)
def round_up(n, decimals=0):
multiplier = 10 ** decimals
return math.ceil(n * multiplier) / multiplier
def trip_duration_stats(df):
"""Displays statistics on the total and average trip duration."""
print('\nCalculating Trip Duration...\n')
start_time = time.time()
# TO DO: display total travel time
Total_Travel_Time = sum(df['Trip Duration'])
# Converting seconds to days
print('Total travel time:', round_up((Total_Travel_Time/86400),2), " Days")
# TO DO: display mean travel time
Mean_Travel_Time = df['Trip Duration'].mean()
# Converting seconds to minutes
print('Mean travel time:', round_up((Mean_Travel_Time/60),2), " Minutes")
print("\nThis took %s seconds." % (time.time() - start_time))
print('-'*40)
def user_stats(df):
"""Displays statistics on bikeshare users."""
print('\nCalculating User Stats...\n')
start_time = time.time()
# TO DO: Display counts of user types
# Washington does not have data for earliest, most recent, and most common year of birth therefore try and except were used.
user_types = df['User Type'].value_counts()
print('User Types:\n', user_types)
# TO DO: Display counts of gender
try:
gender_types = df['Gender'].value_counts()
print('\nGender Types:\n', gender_types)
except KeyError:
print("\nGender Types:\nThere does not seem to be data available to display information about genders for this city.")
# TO DO: Display earliest, most recent, and most common year of birth
try:
Earliest_Year = df['Birth Year'].min()
print('\nThe oldest person using the system was born in:', int(Earliest_Year))
except KeyError:
print("\nThe oldest person using the system was born in:\nThere does not seem to be data available to determine the oldest person for this city.")
try:
Most_Recent_Year = df['Birth Year'].max()
print('\nThe youngest person using the systen was born in:', int(Most_Recent_Year))
except KeyError:
print("\nThe youngest person using the systen was born in:\nThere does not seem to be data available to determine the youngest person for this city.")
# idxmax() get the row label of the maximum value
try:
Most_Common_Year = df['Birth Year'].value_counts().idxmax()
print('\nMost common year that people using the system were born in:', int(Most_Common_Year))
except KeyError:
print("\nMost common year that people using the system were born in:\nThere does not seem to be data available to determine the oldest person for this city.")
print("\nThis took %s seconds." % (time.time() - start_time))
print('-'*40)
def display_data(df):
"""
Display individual trip data
Args:
bikeshare dataframe.
Returns:
None.
"""
start = 0
end = 5
choice = ''
while choice.lower() not in ['yes', 'no']:
choice = input('Do you want to view indiviual trip data? Enter \'Yes\' or \'No\'.\n')
if choice.lower() not in ['yes', 'no']:
print('There are only two options \'Yes\' or \'No\'. Please try again.\n')
elif choice.lower() == "yes":
print(df.iloc[start:end])
while True:
second_choice = input('\nDo you want to view more trip data? Enter \'Yes\' or \'No\'.\n')
if second_choice.lower() not in ['yes', 'no']:
print('Maybe you made a typo. Please try again.\n')
elif second_choice.lower() == "yes":
start += 5
end += 5
print(df.iloc[start:end])
elif second_choice == "no":
return
elif choice.lower() == "no":
return
return
def restart():
restart = input('\nWould you like to restart your search? Enter \'Yes\' or \'No\'.\n')
if restart.lower() != 'yes':
return
else:
main()
def main():
while True:
city, month, day = get_filters()
print('Fetching some insightful data from {} for you...'.format(city).title())
df = load_data(city, month, day)
confirm_choice = ''
while confirm_choice.lower() not in ['yes', 'no']:
if month != 'all' and day != 'all':
confirm_choice = input('Just to confirm you would like to see data for {} in {} on a {}. Type \'Yes\' or \'No\'.\n'.format(city.title(),month.title(),day.title()))
elif month == 'all' and day == 'all':
confirm_choice = input('Just to confirm you would like to see \'all\' data for {}. Type \'Yes\' or \'No\'.\n'.format(city.title()))
elif month == 'all':
confirm_choice = input('Just to confirm you would like to see data for {} for {} months on a {} . Type \'Yes\' or \'No\'.\n'.format(city.title(),month.title(),day.title()))
else:
confirm_choice = input('Just to confirm you would like to see data for {} in {} for {} days. Type \'Yes\' or \'No\'.\n'.format(city.title(),month.title(),day.title()))
if confirm_choice.lower() not in ['yes', 'no']:
print('Maybe you made a typo. Please try again\n')
elif confirm_choice.lower() == "yes":
break
elif confirm_choice.lower() == "no":
restart()
time_stats(df)
station_stats(df)
trip_duration_stats(df)
user_stats(df)
display_data(df)
restart()
break
if __name__ == "__main__":
main()
| [
"jonty.jacobs@gmail.com"
] | jonty.jacobs@gmail.com |
c0c53574fe0228d93d1f83423937147a06d97cef | e177125a896527f0917852db687d8837f41e0ceb | /topoy/either.py | 3b40fc10436242d3c0626a9eaeebf54d11c63df1 | [] | no_license | beezee/topoy | f73fa10eb850ad781c1c507516ced6da19be739d | c56c6627a4430456f1034f4d1b0830c5a654ee52 | refs/heads/master | 2022-07-31T14:29:38.100374 | 2019-11-17T16:59:48 | 2019-11-17T16:59:48 | 219,234,274 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,799 | py | from topoy.applicative import Applicative
from topoy.apply import Apply, tuple
from topoy.monad import Monad
from topoy.hkt import HKT
from topoy.functor import Functor
from topoy.traverse import Traverse
from topoy.typevars import *
from topoy.semigroup import KeepLeft, Semigroup
from topoy.sum import append2sg, bind2, F1, F2, fold2, map2, Sum2
from typing import Callable, cast, Generic, Tuple
class EitherF(Generic[B]): pass
class Either(HKT[EitherF[B], A]):
@staticmethod
def inj(e: 'Either[B, A]') -> 'HKT[EitherF[B], A]':
return cast(HKT[EitherF[B], A], e)
@staticmethod
def proj(hkt: 'HKT[EitherF[B], A]') -> 'Either[B, A]':
return cast('Either[B, A]', hkt)
def __init__(self, run: Sum2[B, A]) -> None:
self.run = run
def left_map(self, f: Callable[[B], C]) -> 'Either[C, A]':
return fold2[B, A, 'Either[C, A]']((
lambda l: Either(F1(f(l))),
lambda r: Either(F2(r))))(self.run)
def map(self, f: Callable[[A], C]) -> 'Either[B, C]':
return fold2[B, A, 'Either[B, C]']((
lambda l: Either(F1(l)),
lambda r: Either(F2(f(r)))))(self.run)
def bimap(self, fl: Callable[[B], C], fr: Callable[[A], D]) -> 'Either[C, D]':
return self.map(fr).left_map(fl)
def fold(self, fl: Callable[[B], C], fr: Callable[[A], C]) -> C:
return fold2((fl, fr))(self.run)
def bind(self, afb: Callable[[A], 'Either[B, C]']) -> 'Either[B, C]':
return fold2[B, A, 'Either[B, C]']((
lambda l: Either(F1(l)),
lambda r: afb(r)))(self.run)
def ap(self, fab: 'Either[B, Callable[[A], C]]',
sg: Semigroup[B] = KeepLeft[B]()) -> 'Either[B, C]':
return Either(append2sg(self.run, fab.run, sg)).map(lambda x: x[1](x[0]))
def tuple(self, fb: 'Either[B, C]') -> 'Either[B, Tuple[A, C]]':
return Either.proj(
tuple(EitherApplicative(), self, fb))
def traverse(self,
ap: Applicative[G],
f: Callable[[A], HKT[G, C]]) -> HKT[G, 'Either[B, C]']:
return fold2[B, A, HKT[G, 'Either[B, C]']]((
lambda l: ap.pure(LeftOf[C].put(l)),
lambda r: ap.map(f(r), lambda x: RightOf[B].put(x))))(self.run)
def __str__(self) -> str:
return fold2[B, A, str]((
lambda l: 'Left(' + str(l) + ')',
lambda r: 'Right(' + str(r) + ')'))(self.run)
class LeftOf(Generic[A]):
@classmethod
def put(cls, b: B) -> Either[B, A]:
return Either[B, A](F1(b))
class RightOf(Generic[A]):
@classmethod
def put(cls, b: B) -> Either[A, B]:
return Either[A, B](F2(b))
class EitherFunctor(Generic[C], Functor[EitherF[C]]):
def map(self, fa: HKT[EitherF[C], A],
f: Callable[[A], B]) -> HKT[EitherF[C], B]:
return Either.proj(fa).map(f)
class EitherMonad(Generic[C], EitherFunctor[C], Monad[EitherF[C]]):
def point(self, a: A) -> HKT[EitherF[C], A]:
return RightOf[C].put(a)
def bind(self, fa: HKT[EitherF[C], A],
f: Callable[[A], HKT[EitherF[C], B]]) -> HKT[EitherF[C], B]:
return Either.proj(fa).bind(lambda x: Either.proj(f(x)))
class EitherApply(Generic[C], Apply[EitherF[C]], EitherFunctor[C]):
def __init__(self, sg: Semigroup[C] = KeepLeft[C]()) -> None:
self._sg = sg
def ap(self, fa: HKT[EitherF[C], A],
fab: HKT[EitherF[C], Callable[[A], B]]) -> HKT[EitherF[C], B]:
return Either.proj(fa).ap(Either.proj(fab), self._sg)
class EitherApplicative(Generic[C],
Applicative[EitherF[C]],
EitherApply[C]):
def pure(self, a: A) -> HKT[EitherF[C], A]:
return RightOf[C].put(a)
class EitherTraverse(Generic[C], Traverse[EitherF[C]], EitherFunctor[C]):
def traverse(self,
ap: Applicative[G], fa: HKT[EitherF[C], A],
f: Callable[[A], HKT[G, B]]) -> HKT[G, HKT[EitherF[C], B]]:
return ap.map(Either.proj(fa).traverse(ap, f), Either.inj)
| [
"brian.zeligson@gmail.com"
] | brian.zeligson@gmail.com |
dd16775a4926161b4b8d7e6769c6edfd9685d2c3 | b68af7ed59f8cb357abb45cc01c4c90e69d0dac4 | /conftest.py | fd2c2f101438991b85a765037d2097de4b849720 | [
"MIT"
] | permissive | paultro708/DataReduction | fb4197c889f47fb35cd89812c76c3bdde7badf17 | ef63b74f3c93e7eb7887c8bc2f25ce0200460d3d | refs/heads/master | 2023-02-02T06:48:42.691450 | 2020-12-18T11:16:17 | 2020-12-18T11:16:17 | 287,581,390 | 5 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,005 | py | import pytest
from tests.const import names, basic_reduction, classifiers
from InstanceReduction.DataPreparation import DataPreparation
from InstanceReduction.Raport import Raport
@pytest.fixture(params=names, scope = 'module')
def data(request):
return DataPreparation(request.param)
@pytest.fixture(params = basic_reduction, scope = 'module')
def reduction_alg(request, data):
return request.param(data)
@pytest.fixture(params = basic_reduction, scope = 'module')
def reduction_alg_names(request):
return request.param
@pytest.fixture(scope = 'module')
def data_prepar_iris(request):
return DataPreparation('iris')
@pytest.fixture(params = basic_reduction, scope = 'module')
def reduction_alg_iris(request):
return request.param(DataPreparation('iris'))
@pytest.fixture(scope='module')
def raport_iris(reduction_alg_iris, data_prepar_iris):
reduction_alg_iris.reduce_instances()
return Raport(data_prepar_iris, reduction_alg_iris.red_data, reduction_alg_iris.red_lab)
| [
"32535575+paultro708@users.noreply.github.com"
] | 32535575+paultro708@users.noreply.github.com |
2159307633408b2e9ff64b9bb7270d5a919c593b | ad5e9d790c564cdf6923670bbf8454f1be88624a | /projects_robotics/cfg/dynamic_ric.cfg | 63113e0c4d6189cce67406622d5ceb4541eb0c45 | [] | no_license | zhangxuelei86/ROS-odometry-car | 677cdaeba5c2aaea014a115c0bfd09f85f32c9e8 | 284d0581dcf5e6a68d164b2e79a93976aa677a3f | refs/heads/master | 2022-04-10T09:30:25.096406 | 2020-03-25T11:12:05 | 2020-03-25T11:12:05 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 546 | cfg | #!/usr/bin/env python
PACKAGE = "projects_robotics"
from dynamic_reconfigure.parameter_generator_catkin import *
gen = ParameterGenerator()
gen.add("computation_type", int_t, 0, "0 Differential Drive, 1 Ackermann", 0, 0, 1)
gen.add("reset_signal", int_t, 0, "change to reset", 0, 0, 1)
gen.add("change_coordinates_signal", int_t, 0, "switch to change coordinates", 0,0,1)
gen.add("x_coordinate", double_t, 0, "New X", 0)
gen.add("y_coordinate", double_t, 0, "New Y", 0)
exit(gen.generate(PACKAGE, "projects_robotics", "dynamic_ric")) | [
"alessia.paccagnella@mail.polimi.it"
] | alessia.paccagnella@mail.polimi.it |
1419006fc8c21bcce2139ded545ad3d7af085e95 | 97379f2f2ab5ffa58ad0bbfbb5a2b1b2bc46d6fe | /modelwrangler/corral/dense_feedforward.py | 22e65f0cfad2e415643b656689bb4bfc3c1256fd | [
"MIT"
] | permissive | brenton-enigma/modelwrangler | 6afffbdf0b929e566adfd4497b60f8c613ef57d5 | 541d3c3267f70ff57a30a8c954b82c039ecff7aa | refs/heads/master | 2021-05-09T20:16:29.687571 | 2017-12-17T18:33:45 | 2017-12-17T18:33:45 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,602 | py | """Module sets up Dense Autoencoder model"""
import tensorflow as tf
from modelwrangler.model_wrangler import ModelWrangler
import modelwrangler.tf_ops as tops
from modelwrangler.tf_models import BaseNetworkParams, BaseNetwork, LayerConfig
class DenseFeedforwardParams(BaseNetworkParams):
"""Dense autoencoder params
"""
LAYER_PARAM_TYPES = {
"hidden_params": LayerConfig,
"output_params": LayerConfig,
}
MODEL_SPECIFIC_ATTRIBUTES = {
"name": "ff",
"in_size": 10,
"out_size": 2,
"hidden_nodes": [5, 5],
"hidden_params": {
"dropout_rate": 0.1
},
"output_params": {
"dropout_rate": None,
"activation": None,
"act_reg": None
},
}
class DenseFeedforwardModel(BaseNetwork):
"""Dense autoencoder model
"""
# pylint: disable=too-many-instance-attributes
PARAM_CLASS = DenseFeedforwardParams
def setup_layers(self, params):
"""Build all the model layers
"""
#
# Input layer
#
layer_stack = [
tf.placeholder(
"float",
name="input",
shape=[None, params.in_size]
)
]
in_layer = layer_stack[0]
for idx, num_nodes in enumerate(params.hidden_nodes):
layer_stack.append(
self.make_dense_layer(
layer_stack[-1],
num_nodes,
'hidden_{}'.format(idx),
params.hidden_params
)
)
preact_out_layer, out_layer = self.make_dense_output_layer(
layer_stack[-1],
params.out_size,
params.output_params
)
target_layer = tf.placeholder(
"float",
name="target",
shape=[None, params.out_size]
)
if params.output_params.activation in ['sigmoid']:
loss = tops.loss_sigmoid_ce(preact_out_layer, target_layer)
elif params.output_params.activation in ['softmax']:
loss = tops.loss_softmax_ce(preact_out_layer, target_layer)
else:
loss = tops.loss_mse(target_layer, out_layer)
return in_layer, out_layer, target_layer, loss
class DenseFeedforward(ModelWrangler):
"""Dense Autoencoder
"""
def __init__(self, in_size=10, **kwargs):
super(DenseFeedforward, self).__init__(
model_class=DenseFeedforwardModel,
in_size=in_size,
**kwargs)
| [
"bmcmenamin@gmail.com"
] | bmcmenamin@gmail.com |
676f594537bc9c7e4d4a487af70a88783494133b | 843cda9d64985676524db33395d8f4439f0cdf50 | /reviews/migrations/0002_auto_20210109_2143.py | 83750986d64ef284cc3a178ca0bfd0128e6f103c | [] | no_license | orangeberry/airbnb-clone | 7bc37f1e0b4af142edf88c38ca84db71a98a9fca | 6d9ecee9a3190f8cee3ae3fcd416261f633ab581 | refs/heads/master | 2023-03-04T11:31:39.685227 | 2021-02-16T16:39:16 | 2021-02-16T16:39:16 | 324,152,827 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 859 | py | # Generated by Django 3.1.4 on 2021-01-09 12:43
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('rooms', '0008_auto_20210109_2143'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('reviews', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='review',
name='room',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='reviews', to='rooms.room'),
),
migrations.AlterField(
model_name='review',
name='user',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='reviews', to=settings.AUTH_USER_MODEL),
),
]
| [
"orangeberry@kakao.com"
] | orangeberry@kakao.com |
841b47896afe9f1263de4f58ffd78c05f3001e4a | a78ddbca5c691acc739cfb65d5914fcf27bf06cb | /DataFilter.py | 04facf6863c268b8ae4239019bc6dc52e8d41c76 | [] | no_license | yangshuoc/pku_SVT_plus | 4620355bace7fdd2ea96f18255698ac1f0f98dea | fd67d945526631c821a092fb0585a801dc94d7f4 | refs/heads/master | 2020-03-24T17:20:51.686792 | 2018-08-02T01:20:59 | 2018-08-02T01:20:59 | 142,855,721 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,889 | py | import csv
import random
# DATA_FILE = 'data.csv'
# MISS_TAG = 0
# MISSING_ROW_NUM = 900
# MISSING_COL_NUM = 6
DATA_FILE = 'vocab_vector.csv'
MISS_TAG = 0
MISSING_ROW_NUM = 200
# MISSING_COL_NUM = 70
MISSING_COL_NUM = 50
# DATA_FILE = 'svt_matrix.csv'
# MISS_TAG = 0
# MISSING_ROW_NUM = 70
# MISSING_COL_NUM = 250
def loadCSVData(file):
csv_reader = csv.reader(open(file, encoding='utf-8'))
strMatrix = []
for r in csv_reader:
strMatrix.append(r)
numMatrix = []
for i in range(len(strMatrix)):
if i == 0:
continue
row = []
for x in strMatrix[i]:
row.append(float(x))
numMatrix.append(row)
return numMatrix
def buildTestMatrix(matrix):
n = len(matrix)
m = len(matrix[0])
selectedN = range(n)
selectedM = range(m)
selectedN = random.sample(selectedN,n-MISSING_ROW_NUM)
selectedM = random.sample(selectedM,m-MISSING_COL_NUM)
selectedN.sort()
selectedM.sort()
# print(selectedM)
# print(selectedN)
for i in range(n):
for j in range(m):
if i in selectedN or j in selectedM:
continue
matrix[i][j] = MISS_TAG
return selectedN,selectedM,matrix
def getCsrMatrix(M,row,col):
n = len(M)
m = len(M[0])
vector = []
for i in range(n):
for j in range(m):
if M[i][j] != MISS_TAG:
row.append(i)
col.append(j)
vector.append(M[i][j])
# print("finished")
return vector,n,m
if __name__ == '__main__':
matrix = loadCSVData(DATA_FILE)
selectedN,selectedM,matrix = buildTestMatrix(matrix)
row = []
col = []
vector = getCsrMatrix(matrix,row,col)
# print(vector)
for row in matrix:
print(row)
| [
"noreply@github.com"
] | noreply@github.com |
6afef4ba6551705e3a2732735b93faeda61ffeb7 | 63d302d31105ed9ce059d12d8d13b48c633e58a3 | /part03-e13_read_series/src/read_series.py | 16f604eab227a190338497ad9349a6497d0d7493 | [] | no_license | doyu/hy-data-analysis-with-python-summer-2021 | 55ccc3a089d3865bd1ae89b92c9e1784c44e1935 | 1a34983d2c3b9a20473d16209ba8a74f9d68daf2 | refs/heads/main | 2023-08-22T02:14:05.785821 | 2021-09-29T11:41:59 | 2021-09-29T11:41:59 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 592 | py | #!/usr/bin/env python3
import pandas as pd
def read_series():
serie = pd.Series([], dtype='object')
line = input()
value_list = []
index_list = []
while line != "":
try:
line = line.split()
value = line[1]
index = line[0]
index_list.append(index)
value_list.append(value)
except:
print("Error")
line = input()
serie2 = pd.Series(value_list, index = index_list)
return serie.append(serie2)
def main():
print(read_series())
if __name__ == "__main__":
main()
| [
"hiroshi.doyu@gmail.com"
] | hiroshi.doyu@gmail.com |
967a7bd4f120ca55ba37aef4cb2e8af1f8b53ff8 | 393eb8b5e87de5572e4bd31902c9a42edf91e2f2 | /mysite/home/models.py | 6409d9c97f4d90e5c82308b3d52d593562195328 | [] | no_license | heyyysus/Yeetboard | ea185160e89cd343d360981f96204f44a4eb7b18 | ecd0aaa416a03028b973619b3e4eeb42ea04cf6e | refs/heads/master | 2021-09-29T00:55:38.598684 | 2018-11-22T01:21:35 | 2018-11-22T01:21:35 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,013 | py | from django.db import models
from django.contrib.auth.models import (
BaseUserManager, AbstractBaseUser, User
)
from django.db.models.signals import post_save
from django.dispatch import receiver
class Profile(models.Model):
user = models.OneToOneField(User, on_delete=models.CASCADE)
bio = models.TextField(max_length=500, blank=True)
email = models.TextField(max_length=50, blank=True)
isActivated = models.BooleanField(default=False)
@receiver(post_save, sender=User)
def create_user_profile(sender, instance, created, **kwargs):
if created:
Profile.objects.create(user=instance)
@receiver(post_save, sender=User)
def save_user_profile(sender, instance, **kwargs):
instance.profile.save()
class EmailVerification(models.Model):
user = models.CharField(max_length = 150, default="");
activation_code = models.CharField(max_length = 50)
redeemed = models.BooleanField(default = False)
lastsent = models.DateTimeField(null=True)
class Post(models.Model):
author = models.CharField(max_length = 32, blank=False)
title = models.CharField(max_length = 200, blank=False)
content = models.TextField(max_length = 5000, blank=True)
timestamp = models.DateTimeField(null=False)
isNsfw = models.BooleanField(default=False)
isSpoiler = models.BooleanField(default=False)
post_id = models.CharField(max_length=7, blank=False)
def as_dict(self):
return {
"id": self.id,
"author": self.author,
"title": self.title,
"content": self.content,
"timestamp": self.timestamp,
"isNsfw": self.isNsfw,
"isSpoiler": self.isSpoiler,
"post_id": self.post_id
}
class UserActions(models.Model):
user = models.CharField(max_length = 32, blank=False, default="<GUEST>")
action = models.CharField(max_length = 256, blank=False)
timestamp = models.DateTimeField(null=False)
ipv4 = models.CharField(max_length = 16, blank=False)
| [
"jesus.velarde07@gmail.com"
] | jesus.velarde07@gmail.com |
e0804b03b742211cf22e225595431d99051e0976 | a6ab576fcdb7f7258bf579765c92a664530b6574 | /Exceptions.py | deb9da8256dbc666dff8ca553ac66a4d872c7199 | [] | no_license | ILYSHI/Python-code | 36a7322be62dcda1c3e7949f23fed927657d40fa | ff110688e32be6f91a0fce5d38c2775a062c1225 | refs/heads/main | 2023-01-19T03:55:38.041634 | 2020-11-21T12:45:04 | 2020-11-21T12:45:04 | 314,808,790 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 207 | py | class BadName(Exception):
pass
def greet(name):
if name[0].isupper():
return "Hello, " + name
else:
raise BadName(name + ' is inappropriate name')
print('Import is execution') | [
"termit63@gmail.com"
] | termit63@gmail.com |
ac215caf0b42fede5612998b3aad73bcf7068688 | 468daac37b861ce6e9f7e18f491754ba0acd9818 | /TTTBoard.py | 236ff071bf3cb9086d343156dc5b8c689841942a | [] | no_license | charlesdaniel/Bioloid_TicTacToe | 49ec570bdf5704abf394d0cb567bd6b3c8257b18 | 37b1e45135627fb6513dd5cd3b440566bfd08952 | refs/heads/master | 2016-08-05T14:27:32.220491 | 2011-09-05T13:59:09 | 2011-09-05T14:01:35 | 2,328,502 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,480 | py | ## Import the base TTTPlayer class (we'll need it later to prompt for input)
from TTTPlayer import *
## The TTTBoard is the engine that drives the game. It handles prompting for input from
## any TTTPlayer class (this includes TTTPlayerAI since that inherits from TTTPlayer).
## Additionally this class prints out the board to the screen, checks for any winnings,
## and tells the winner if they won.
class TTTBoard():
def __init__(self, player0, player1):
self.players = [player0, player1]
# The Board is a 1 dimensional array layed out like so
# 0 | 1 | 2
# -----------
# 3 | 4 | 5
# -----------
# 6 | 7 | 8
#
# The values in the cells are ' ' or a player's index from the players array (ie. "0" or "1")
self.board = []
def resetBoard(self):
# This method resets the board values to a space character (no piece) in each cell
self.board = [ ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ']
def checkWin(self):
## This method scans through all the possible winning combinations of cells and sees if the values
## in those cells are the same (and not " " empty). If it finds a combination then it returns the
## combination back. Otherwise it returns the value None.
# These are all the winning cell combinations in Tic-Tac-Toe
winningCombinations = [ [0,1,2], [3,4,5], [6,7,8], [0,3,6], [1,4,7], [2,5,8], [0,4,8], [2,4,6] ]
# Run through those winning combinations looking for matching values in each cell in each combination
# Also make sure the cells actually have a value other than ' '
for c in winningCombinations:
if((self.board[c[0]] != ' ') and
(self.board[c[0]] == self.board[c[1]]) and
(self.board[c[1]] == self.board[c[2]])):
return c # Found a combination which has all the same values, win
return None # Didn't find any combinations that won
def printBoard(self):
## This method prints out the nice board
print " %s | %s | %s " % (self.board[0], self.board[1], self.board[2])
print "-----------"
print " %s | %s | %s " % (self.board[3], self.board[4], self.board[5])
print "-----------"
print " %s | %s | %s " % (self.board[6], self.board[7], self.board[8])
def playGame(self):
## This method is the main engine of a (one) game (the driver).
# Clear the board of pieces
self.resetBoard()
winningMove = None # This will hold the winning combination
numMoves = 0
p = 0 # The index of the current player in the self.players array
# Main Loop: Prompt for move and check winnings until there's a winner or 8 moves have been made
while ((numMoves < 9) and (winningMove == None)):
# Print the board for the user
self.printBoard()
# Ask the current player for a move via the TTTPlayer/TTTPlayerAI getMove() method
m = self.players[p].getMove(self.board)
# Check to see if that cell is empty or not
if(self.board[m] == ' '):
# Place the piece (value of p either 0 or 1) on that cell
self.board[m] = p
# Tells the current player the piece has been placed successfully
# (this is so the TTTPlayer/TTTPlayerAI can move the robot arms to put
# the marking in that cell.
self.players[p].placePiece(m)
# Increment the count of moves taken
numMoves = numMoves + 1
# Toggle to make the other player the current player
p = 1 - p # Simple trick to toggle between p = 0 and p = 1
else:
# If we're here then it means the cell was not empty
print "ILLEGAL MOVE PLAYER ", self.players[p].name, " TRY AGAIN "
# We check to see if anybody won
winningMove = self.checkWin()
# We are outside the main game loop here. So we print the final board out for the user to see.
self.printBoard()
# We check to see how we exited the main game loop (either winningMove contains the winning combination
# or we reached the maximum number of moves). So this if statement checks to see if winningMove is not None
# like we initialized it before the loop.
if (winningMove != None):
# We find out the piece (0 or 1) that won from the first cell of the winningMove array
winner = self.board[winningMove[0]]
# Tell them they won
print "PLAYER ", self.players[winner].name, " HAS WON THIS GAME USING POSITIONS ", winningMove
# Tell the TTTPlayer/TTTPlayerAI to draw their winning line (using the arms)
self.players[winner].placeWinningLine(winningMove)
else:
# If we're here then we must have exited the loop because we reached the limit of moves
print "NOBODY WON !"
| [
"charles.sam.daniel@gmail.com"
] | charles.sam.daniel@gmail.com |
f31e15dee4055a30fb9f5aa5ef69fe6ab9a62139 | 734719be1ef3ceb2de58c56949969a76170f65a6 | /binary_tree_max_path_sum.py | 6a260837d8e220f765e80290b5fc20c073aced69 | [] | no_license | raunaqjain/leetcode_solutions | 26e174eb98700951624ca83ab7661dbc35a23729 | a373085712dba7971ff90bc982f3a36dffd14b5a | refs/heads/master | 2023-01-09T02:42:28.871140 | 2020-11-01T01:43:20 | 2020-11-01T01:43:20 | 309,002,412 | 0 | 0 | null | 2020-11-01T01:43:21 | 2020-11-01T01:25:54 | Python | UTF-8 | Python | false | false | 437 | py | class Solution:
def helper(self, root):
if not root:
return 0
left = max(0, self.helper(root.left))
right = max(0, self.helper(root.right))
self.ans = max(self.ans, left + right + root.val)
return max(left, right) + root.val
def maxPathSum(self, root: TreeNode) -> int:
self.ans = float('-inf')
self.helper(root)
return self.ans
| [
"jraunaq18@gmail.com"
] | jraunaq18@gmail.com |
38cde75832792f190c10e244e0589cd487f12a03 | 27341bdbb9bc564905f8bbc01604eef9cefe6ca4 | /venv/lib/python3.9/site-packages/sqlalchemy_jsonfield/__init__.py | cbc8ad531207ccddd2148b41318e6364443c230f | [] | no_license | derek-johns/nba-batch-pipeline | fb10ae171e21537d7d83a8ac89c3c2c8b7072f21 | 65898c80c1eea448c75ba07b553b49c7a93837b2 | refs/heads/main | 2023-02-18T16:52:52.192766 | 2021-01-09T20:34:28 | 2021-01-09T20:34:28 | 328,239,908 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,213 | py | # Copyright 2016 Alexey Stepanov aka penguinolog
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Implement JSONField for SQLAlchemy."""
# Local Implementation
from .jsonfield import JSONField
from .jsonfield import mutable_json_field
try:
# Local Implementation
from ._version import version as __version__
except ImportError:
pass
__all__ = ("JSONField", "mutable_json_field")
__author__ = "Alexey Stepanov <penguinolog@gmail.com>"
__author_email__ = "penguinolog@gmail.com"
__url__ = "https://github.com/penguinolog/sqlalchemy_jsonfield"
__description__ = "SQLALchemy JSONField implementation for storing dicts at SQL"
__license__ = "Apache License, Version 2.0"
| [
"d.johnson13879@gmail.com"
] | d.johnson13879@gmail.com |
9361bb61cd85e87954f06cd6e55599d0840b2082 | efd9c0d47e94dbac8e6e700f45c2f7747ded094b | /Email_Template/Email/migrations/0003_auto_20181023_1457.py | 2f8bca92437323d6af215b43d5f72c3c90b616eb | [] | no_license | SoumyaPuj/EmployeeDatabaseRequirement | dd1b2f21c13f47835e7390c3f831f5d96ef611b6 | d9b3fe22e2327af67aaf6e8d47e50dea30aa01c5 | refs/heads/master | 2020-04-04T19:04:52.513386 | 2018-11-05T11:32:13 | 2018-11-05T11:32:13 | 156,191,071 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 362 | py | # Generated by Django 2.1.2 on 2018-10-23 09:27
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('Email', '0002_auto_20181023_1456'),
]
operations = [
migrations.RenameModel(
old_name='Email_Design',
new_name='Email_Information',
),
]
| [
"noreply@github.com"
] | noreply@github.com |
cccbb148040f217b8a624f39a07f85f4fb552de4 | 433ada0b349e8a68dd85a5af047b90d23aee44c9 | /include/ClientCaches.py | f4a6eb45028de815aa1b2763dfac4061d03724d5 | [
"WTFPL"
] | permissive | 3wayHimself/hydrus | 7ddfe3507ad2b3e9dc4ab69cb9c6e25efc06c5aa | 804ffe8cecfe01bdb9518070d31dbf826b72e8ef | refs/heads/master | 2020-03-23T04:37:53.849078 | 2018-07-11T20:23:51 | 2018-07-11T20:23:51 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 106,375 | py | import ClientDefaults
import ClientDownloading
import ClientParsing
import ClientPaths
import ClientRendering
import ClientSearch
import ClientServices
import ClientThreading
import HydrusConstants as HC
import HydrusExceptions
import HydrusFileHandling
import HydrusPaths
import HydrusSerialisable
import HydrusSessions
import HydrusThreading
import itertools
import json
import os
import random
import requests
import threading
import time
import urllib
import wx
import HydrusData
import ClientData
import ClientConstants as CC
import HydrusGlobals as HG
import collections
import HydrusTags
import traceback
# important thing here, and reason why it is recursive, is because we want to preserve the parent-grandparent interleaving
def BuildServiceKeysToChildrenToParents( service_keys_to_simple_children_to_parents ):
def AddParents( simple_children_to_parents, children_to_parents, child, parents ):
for parent in parents:
if parent not in children_to_parents[ child ]:
children_to_parents[ child ].append( parent )
if parent in simple_children_to_parents:
grandparents = simple_children_to_parents[ parent ]
AddParents( simple_children_to_parents, children_to_parents, child, grandparents )
service_keys_to_children_to_parents = collections.defaultdict( HydrusData.default_dict_list )
for ( service_key, simple_children_to_parents ) in service_keys_to_simple_children_to_parents.items():
children_to_parents = service_keys_to_children_to_parents[ service_key ]
for ( child, parents ) in simple_children_to_parents.items():
AddParents( simple_children_to_parents, children_to_parents, child, parents )
return service_keys_to_children_to_parents
def BuildServiceKeysToSimpleChildrenToParents( service_keys_to_pairs_flat ):
service_keys_to_simple_children_to_parents = collections.defaultdict( HydrusData.default_dict_set )
for ( service_key, pairs ) in service_keys_to_pairs_flat.items():
service_keys_to_simple_children_to_parents[ service_key ] = BuildSimpleChildrenToParents( pairs )
return service_keys_to_simple_children_to_parents
def BuildSimpleChildrenToParents( pairs ):
simple_children_to_parents = HydrusData.default_dict_set()
for ( child, parent ) in pairs:
if child == parent:
continue
if LoopInSimpleChildrenToParents( simple_children_to_parents, child, parent ): continue
simple_children_to_parents[ child ].add( parent )
return simple_children_to_parents
def CollapseTagSiblingPairs( groups_of_pairs ):
# This now takes 'groups' of pairs in descending order of precedence
# This allows us to mandate that local tags take precedence
# a pair is invalid if:
# it causes a loop (a->b, b->c, c->a)
# there is already a relationship for the 'bad' sibling (a->b, a->c)
valid_chains = {}
for pairs in groups_of_pairs:
pairs = list( pairs )
pairs.sort()
for ( bad, good ) in pairs:
if bad == good:
# a->a is a loop!
continue
if bad not in valid_chains:
we_have_a_loop = False
current_best = good
while current_best in valid_chains:
current_best = valid_chains[ current_best ]
if current_best == bad:
we_have_a_loop = True
break
if not we_have_a_loop:
valid_chains[ bad ] = good
# now we collapse the chains, turning:
# a->b, b->c ... e->f
# into
# a->f, b->f ... e->f
siblings = {}
for ( bad, good ) in valid_chains.items():
# given a->b, want to find f
if good in siblings:
# f already calculated and added
best = siblings[ good ]
else:
# we don't know f for this chain, so let's figure it out
current_best = good
while current_best in valid_chains:
current_best = valid_chains[ current_best ] # pursue endpoint f
best = current_best
# add a->f
siblings[ bad ] = best
return siblings
def LoopInSimpleChildrenToParents( simple_children_to_parents, child, parent ):
potential_loop_paths = { parent }
while len( potential_loop_paths.intersection( simple_children_to_parents.keys() ) ) > 0:
new_potential_loop_paths = set()
for potential_loop_path in potential_loop_paths.intersection( simple_children_to_parents.keys() ):
new_potential_loop_paths.update( simple_children_to_parents[ potential_loop_path ] )
potential_loop_paths = new_potential_loop_paths
if child in potential_loop_paths: return True
return False
class ClientFilesManager( object ):
def __init__( self, controller ):
self._controller = controller
self._lock = threading.Lock()
self._prefixes_to_locations = {}
self._bad_error_occured = False
self._missing_locations = set()
self._Reinit()
def _GenerateExpectedFilePath( self, hash, mime ):
hash_encoded = hash.encode( 'hex' )
prefix = 'f' + hash_encoded[:2]
location = self._prefixes_to_locations[ prefix ]
path = os.path.join( location, prefix, hash_encoded + HC.mime_ext_lookup[ mime ] )
return path
def _GenerateExpectedFullSizeThumbnailPath( self, hash ):
hash_encoded = hash.encode( 'hex' )
prefix = 't' + hash_encoded[:2]
location = self._prefixes_to_locations[ prefix ]
path = os.path.join( location, prefix, hash_encoded ) + '.thumbnail'
return path
def _GenerateExpectedResizedThumbnailPath( self, hash ):
hash_encoded = hash.encode( 'hex' )
prefix = 'r' + hash_encoded[:2]
location = self._prefixes_to_locations[ prefix ]
path = os.path.join( location, prefix, hash_encoded ) + '.thumbnail.resized'
return path
def _GenerateFullSizeThumbnail( self, hash, mime = None ):
if mime is None:
try:
file_path = self._LookForFilePath( hash )
except HydrusExceptions.FileMissingException:
raise HydrusExceptions.FileMissingException( 'The thumbnail for file ' + hash.encode( 'hex' ) + ' was missing. It could not be regenerated because the original file was also missing. This event could indicate hard drive corruption or an unplugged external drive. Please check everything is ok.' )
mime = HydrusFileHandling.GetMime( file_path )
else:
file_path = self._GenerateExpectedFilePath( hash, mime )
try:
percentage_in = self._controller.new_options.GetInteger( 'video_thumbnail_percentage_in' )
thumbnail = HydrusFileHandling.GenerateThumbnail( file_path, mime, percentage_in = percentage_in )
except Exception as e:
raise HydrusExceptions.FileMissingException( 'The thumbnail for file ' + hash.encode( 'hex' ) + ' was missing. It could not be regenerated from the original file for the above reason. This event could indicate hard drive corruption. Please check everything is ok.' )
full_size_path = self._GenerateExpectedFullSizeThumbnailPath( hash )
try:
HydrusPaths.MakeFileWritable( full_size_path )
with open( full_size_path, 'wb' ) as f:
f.write( thumbnail )
except Exception as e:
raise HydrusExceptions.FileMissingException( 'The thumbnail for file ' + hash.encode( 'hex' ) + ' was missing. It was regenerated from the original file, but hydrus could not write it to the location ' + full_size_path + ' for the above reason. This event could indicate hard drive corruption, and it also suggests that hydrus does not have permission to write to its thumbnail folder. Please check everything is ok.' )
def _GenerateResizedThumbnail( self, hash, mime ):
full_size_path = self._GenerateExpectedFullSizeThumbnailPath( hash )
thumbnail_dimensions = self._controller.options[ 'thumbnail_dimensions' ]
if mime in ( HC.IMAGE_GIF, HC.IMAGE_PNG ):
fullsize_thumbnail_mime = HC.IMAGE_PNG
else:
fullsize_thumbnail_mime = HC.IMAGE_JPEG
try:
thumbnail_resized = HydrusFileHandling.GenerateThumbnailFromStaticImage( full_size_path, thumbnail_dimensions, fullsize_thumbnail_mime )
except:
try:
ClientPaths.DeletePath( full_size_path, always_delete_fully = True )
except:
raise HydrusExceptions.FileMissingException( 'The thumbnail for file ' + hash.encode( 'hex' ) + ' was found, but it would not render. An attempt to delete it was made, but that failed as well. This event could indicate hard drive corruption, and it also suggests that hydrus does not have permission to write to its thumbnail folder. Please check everything is ok.' )
self._GenerateFullSizeThumbnail( hash, mime )
thumbnail_resized = HydrusFileHandling.GenerateThumbnailFromStaticImage( full_size_path, thumbnail_dimensions, fullsize_thumbnail_mime )
resized_path = self._GenerateExpectedResizedThumbnailPath( hash )
try:
HydrusPaths.MakeFileWritable( resized_path )
with open( resized_path, 'wb' ) as f:
f.write( thumbnail_resized )
except Exception as e:
HydrusData.ShowException( e )
raise HydrusExceptions.FileMissingException( 'The thumbnail for file ' + hash.encode( 'hex' ) + ' was found, but the resized version would not save to disk. This event suggests that hydrus does not have permission to write to its thumbnail folder. Please check everything is ok.' )
def _GetRecoverTuple( self ):
all_locations = { location for location in self._prefixes_to_locations.values() }
all_prefixes = self._prefixes_to_locations.keys()
for possible_location in all_locations:
for prefix in all_prefixes:
correct_location = self._prefixes_to_locations[ prefix ]
if possible_location != correct_location and os.path.exists( os.path.join( possible_location, prefix ) ):
recoverable_location = possible_location
return ( prefix, recoverable_location, correct_location )
return None
def _GetRebalanceTuple( self ):
( locations_to_ideal_weights, resized_thumbnail_override, full_size_thumbnail_override ) = self._controller.new_options.GetClientFilesLocationsToIdealWeights()
total_weight = sum( locations_to_ideal_weights.values() )
ideal_locations_to_normalised_weights = { location : weight / total_weight for ( location, weight ) in locations_to_ideal_weights.items() }
current_locations_to_normalised_weights = collections.defaultdict( lambda: 0 )
file_prefixes = [ prefix for prefix in self._prefixes_to_locations if prefix.startswith( 'f' ) ]
for file_prefix in file_prefixes:
location = self._prefixes_to_locations[ file_prefix ]
current_locations_to_normalised_weights[ location ] += 1.0 / 256
for location in current_locations_to_normalised_weights.keys():
if location not in ideal_locations_to_normalised_weights:
ideal_locations_to_normalised_weights[ location ] = 0.0
#
overweight_locations = []
underweight_locations = []
for ( location, ideal_weight ) in ideal_locations_to_normalised_weights.items():
if location in current_locations_to_normalised_weights:
current_weight = current_locations_to_normalised_weights[ location ]
if current_weight < ideal_weight:
underweight_locations.append( location )
elif current_weight >= ideal_weight + 1.0 / 256:
overweight_locations.append( location )
else:
underweight_locations.append( location )
#
if len( underweight_locations ) > 0 and len( overweight_locations ) > 0:
overweight_location = overweight_locations.pop( 0 )
underweight_location = underweight_locations.pop( 0 )
random.shuffle( file_prefixes )
for file_prefix in file_prefixes:
location = self._prefixes_to_locations[ file_prefix ]
if location == overweight_location:
return ( file_prefix, overweight_location, underweight_location )
else:
if full_size_thumbnail_override is None:
for hex_prefix in HydrusData.IterateHexPrefixes():
full_size_prefix = 't' + hex_prefix
file_prefix = 'f' + hex_prefix
full_size_location = self._prefixes_to_locations[ full_size_prefix ]
file_location = self._prefixes_to_locations[ file_prefix ]
if full_size_location != file_location:
return ( full_size_prefix, full_size_location, file_location )
else:
for hex_prefix in HydrusData.IterateHexPrefixes():
full_size_prefix = 't' + hex_prefix
full_size_location = self._prefixes_to_locations[ full_size_prefix ]
if full_size_location != full_size_thumbnail_override:
return ( full_size_prefix, full_size_location, full_size_thumbnail_override )
if resized_thumbnail_override is None:
for hex_prefix in HydrusData.IterateHexPrefixes():
resized_prefix = 'r' + hex_prefix
file_prefix = 'f' + hex_prefix
resized_location = self._prefixes_to_locations[ resized_prefix ]
file_location = self._prefixes_to_locations[ file_prefix ]
if resized_location != file_location:
return ( resized_prefix, resized_location, file_location )
else:
for hex_prefix in HydrusData.IterateHexPrefixes():
resized_prefix = 'r' + hex_prefix
resized_location = self._prefixes_to_locations[ resized_prefix ]
if resized_location != resized_thumbnail_override:
return ( resized_prefix, resized_location, resized_thumbnail_override )
return None
def _IterateAllFilePaths( self ):
for ( prefix, location ) in self._prefixes_to_locations.items():
if prefix.startswith( 'f' ):
dir = os.path.join( location, prefix )
filenames = os.listdir( dir )
for filename in filenames:
yield os.path.join( dir, filename )
def _IterateAllThumbnailPaths( self ):
for ( prefix, location ) in self._prefixes_to_locations.items():
if prefix.startswith( 't' ) or prefix.startswith( 'r' ):
dir = os.path.join( location, prefix )
filenames = os.listdir( dir )
for filename in filenames:
yield os.path.join( dir, filename )
def _LookForFilePath( self, hash ):
for potential_mime in HC.ALLOWED_MIMES:
potential_path = self._GenerateExpectedFilePath( hash, potential_mime )
if os.path.exists( potential_path ):
return potential_path
raise HydrusExceptions.FileMissingException( 'File for ' + hash.encode( 'hex' ) + ' not found!' )
def _Reinit( self ):
self._prefixes_to_locations = self._controller.Read( 'client_files_locations' )
if HG.client_controller.IsFirstStart():
try:
for ( prefix, location ) in self._prefixes_to_locations.items():
HydrusPaths.MakeSureDirectoryExists( location )
subdir = os.path.join( location, prefix )
HydrusPaths.MakeSureDirectoryExists( subdir )
except:
text = 'Attempting to create the database\'s client_files folder structure failed!'
wx.MessageBox( text )
raise
else:
self._missing_locations = set()
for ( prefix, location ) in self._prefixes_to_locations.items():
if os.path.exists( location ):
subdir = os.path.join( location, prefix )
if not os.path.exists( subdir ):
self._missing_locations.add( ( location, prefix ) )
else:
self._missing_locations.add( ( location, prefix ) )
if len( self._missing_locations ) > 0:
self._bad_error_occured = True
#
missing_dict = HydrusData.BuildKeyToListDict( self._missing_locations )
missing_locations = list( missing_dict.keys() )
missing_locations.sort()
missing_string = ''
for l in missing_locations:
missing_prefixes = list( missing_dict[ l ] )
missing_prefixes.sort()
missing_prefixes_string = ' ' + os.linesep.join( ( ', '.join( block ) for block in HydrusData.SplitListIntoChunks( missing_prefixes, 32 ) ) )
missing_string += os.linesep
missing_string += l
missing_string += os.linesep
missing_string += missing_prefixes_string
#
if len( self._missing_locations ) > 4:
text = 'When initialising the client files manager, some file locations did not exist! They have all been written to the log!'
text += os.linesep * 2
text += 'If this is happening on client boot, you should now be presented with a dialog to correct this manually!'
wx.MessageBox( text )
HydrusData.DebugPrint( text )
HydrusData.DebugPrint( 'Missing locations follow:' )
HydrusData.DebugPrint( missing_string )
else:
text = 'When initialising the client files manager, these file locations did not exist:'
text += os.linesep * 2
text += missing_string
text += os.linesep * 2
text += 'If this is happening on client boot, you should now be presented with a dialog to correct this manually!'
wx.MessageBox( text )
HydrusData.DebugPrint( text )
def GetMissing( self ):
return self._missing_locations
def LocklessAddFileFromString( self, hash, mime, data ):
dest_path = self._GenerateExpectedFilePath( hash, mime )
HydrusPaths.MakeFileWritable( dest_path )
with open( dest_path, 'wb' ) as f:
f.write( data )
def LocklessAddFile( self, hash, mime, source_path ):
dest_path = self._GenerateExpectedFilePath( hash, mime )
if not os.path.exists( dest_path ):
successful = HydrusPaths.MirrorFile( source_path, dest_path )
if not successful:
raise Exception( 'There was a problem copying the file from ' + source_path + ' to ' + dest_path + '!' )
def AddFullSizeThumbnail( self, hash, thumbnail ):
with self._lock:
self.LocklessAddFullSizeThumbnail( hash, thumbnail )
def LocklessAddFullSizeThumbnail( self, hash, thumbnail ):
path = self._GenerateExpectedFullSizeThumbnailPath( hash )
HydrusPaths.MakeFileWritable( path )
with open( path, 'wb' ) as f:
f.write( thumbnail )
resized_path = self._GenerateExpectedResizedThumbnailPath( hash )
if os.path.exists( resized_path ):
ClientPaths.DeletePath( resized_path, always_delete_fully = True )
self._controller.pub( 'clear_thumbnails', { hash } )
self._controller.pub( 'new_thumbnails', { hash } )
def CheckFileIntegrity( self, *args, **kwargs ):
with self._lock:
self._controller.WriteSynchronous( 'file_integrity', *args, **kwargs )
def ClearOrphans( self, move_location = None ):
with self._lock:
job_key = ClientThreading.JobKey( cancellable = True )
job_key.SetVariable( 'popup_title', 'clearing orphans' )
job_key.SetVariable( 'popup_text_1', 'preparing' )
self._controller.pub( 'message', job_key )
orphan_paths = []
orphan_thumbnails = []
for ( i, path ) in enumerate( self._IterateAllFilePaths() ):
( i_paused, should_quit ) = job_key.WaitIfNeeded()
if should_quit:
return
if i % 100 == 0:
status = 'reviewed ' + HydrusData.ToHumanInt( i ) + ' files, found ' + HydrusData.ToHumanInt( len( orphan_paths ) ) + ' orphans'
job_key.SetVariable( 'popup_text_1', status )
try:
is_an_orphan = False
( directory, filename ) = os.path.split( path )
should_be_a_hex_hash = filename[:64]
hash = should_be_a_hex_hash.decode( 'hex' )
is_an_orphan = HG.client_controller.Read( 'is_an_orphan', 'file', hash )
except:
is_an_orphan = True
if is_an_orphan:
if move_location is not None:
( source_dir, filename ) = os.path.split( path )
dest = os.path.join( move_location, filename )
dest = HydrusPaths.AppendPathUntilNoConflicts( dest )
HydrusData.Print( 'Moving the orphan ' + path + ' to ' + dest )
HydrusPaths.MergeFile( path, dest )
orphan_paths.append( path )
time.sleep( 2 )
for ( i, path ) in enumerate( self._IterateAllThumbnailPaths() ):
( i_paused, should_quit ) = job_key.WaitIfNeeded()
if should_quit:
return
if i % 100 == 0:
status = 'reviewed ' + HydrusData.ToHumanInt( i ) + ' thumbnails, found ' + HydrusData.ToHumanInt( len( orphan_thumbnails ) ) + ' orphans'
job_key.SetVariable( 'popup_text_1', status )
try:
is_an_orphan = False
( directory, filename ) = os.path.split( path )
should_be_a_hex_hash = filename[:64]
hash = should_be_a_hex_hash.decode( 'hex' )
is_an_orphan = HG.client_controller.Read( 'is_an_orphan', 'thumbnail', hash )
except:
is_an_orphan = True
if is_an_orphan:
orphan_thumbnails.append( path )
time.sleep( 2 )
if move_location is None and len( orphan_paths ) > 0:
status = 'found ' + HydrusData.ToHumanInt( len( orphan_paths ) ) + ' orphans, now deleting'
job_key.SetVariable( 'popup_text_1', status )
time.sleep( 5 )
for path in orphan_paths:
( i_paused, should_quit ) = job_key.WaitIfNeeded()
if should_quit:
return
HydrusData.Print( 'Deleting the orphan ' + path )
status = 'deleting orphan files: ' + HydrusData.ConvertValueRangeToPrettyString( i + 1, len( orphan_paths ) )
job_key.SetVariable( 'popup_text_1', status )
ClientPaths.DeletePath( path )
if len( orphan_thumbnails ) > 0:
status = 'found ' + HydrusData.ToHumanInt( len( orphan_thumbnails ) ) + ' orphan thumbnails, now deleting'
job_key.SetVariable( 'popup_text_1', status )
time.sleep( 5 )
for ( i, path ) in enumerate( orphan_thumbnails ):
( i_paused, should_quit ) = job_key.WaitIfNeeded()
if should_quit:
return
status = 'deleting orphan thumbnails: ' + HydrusData.ConvertValueRangeToPrettyString( i + 1, len( orphan_thumbnails ) )
job_key.SetVariable( 'popup_text_1', status )
HydrusData.Print( 'Deleting the orphan ' + path )
ClientPaths.DeletePath( path, always_delete_fully = True )
if len( orphan_paths ) == 0 and len( orphan_thumbnails ) == 0:
final_text = 'no orphans found!'
else:
final_text = HydrusData.ToHumanInt( len( orphan_paths ) ) + ' orphan files and ' + HydrusData.ToHumanInt( len( orphan_thumbnails ) ) + ' orphan thumbnails cleared!'
job_key.SetVariable( 'popup_text_1', final_text )
HydrusData.Print( job_key.ToString() )
job_key.Finish()
def DelayedDeleteFiles( self, hashes, time_to_delete ):
while not HydrusData.TimeHasPassed( time_to_delete ):
time.sleep( 0.5 )
big_pauser = HydrusData.BigJobPauser( period = 1 )
with self._lock:
for hash in hashes:
try:
path = self._LookForFilePath( hash )
except HydrusExceptions.FileMissingException:
continue
ClientPaths.DeletePath( path )
big_pauser.Pause()
def DelayedDeleteThumbnails( self, hashes, time_to_delete ):
while not HydrusData.TimeHasPassed( time_to_delete ):
time.sleep( 0.5 )
with self._lock:
big_pauser = HydrusData.BigJobPauser( period = 1 )
for hash in hashes:
path = self._GenerateExpectedFullSizeThumbnailPath( hash )
resized_path = self._GenerateExpectedResizedThumbnailPath( hash )
ClientPaths.DeletePath( path, always_delete_fully = True )
ClientPaths.DeletePath( resized_path, always_delete_fully = True )
big_pauser.Pause()
def GetFilePath( self, hash, mime = None ):
with self._lock:
return self.LocklessGetFilePath( hash, mime )
def ImportFile( self, file_import_job ):
( pre_import_status, hash, note ) = file_import_job.GenerateHashAndStatus()
if file_import_job.IsNewToDB():
file_import_job.GenerateInfo()
file_import_job.CheckIsGoodToImport()
( temp_path, thumbnail ) = file_import_job.GetTempPathAndThumbnail()
mime = file_import_job.GetMime()
with self._lock:
self.LocklessAddFile( hash, mime, temp_path )
if thumbnail is not None:
self.LocklessAddFullSizeThumbnail( hash, thumbnail )
( import_status, note ) = self._controller.WriteSynchronous( 'import_file', file_import_job )
else:
import_status = pre_import_status
file_import_job.PubsubContentUpdates()
return ( import_status, hash, note )
def LocklessGetFilePath( self, hash, mime = None ):
if mime is None:
path = self._LookForFilePath( hash )
else:
path = self._GenerateExpectedFilePath( hash, mime )
if not os.path.exists( path ):
raise HydrusExceptions.FileMissingException( 'No file found at path + ' + path + '!' )
return path
def GetFullSizeThumbnailPath( self, hash, mime = None ):
with self._lock:
path = self._GenerateExpectedFullSizeThumbnailPath( hash )
if not os.path.exists( path ):
self._GenerateFullSizeThumbnail( hash, mime )
if not self._bad_error_occured:
self._bad_error_occured = True
HydrusData.ShowText( 'A thumbnail for a file, ' + hash.encode( 'hex' ) + ', was missing. It has been regenerated from the original file, but this event could indicate hard drive corruption. Please check everything is ok. This error may be occuring for many files, but this message will only display once per boot. If you are recovering from a fractured database, you may wish to run \'database->regenerate->all thumbnails\'.' )
return path
def GetResizedThumbnailPath( self, hash, mime ):
with self._lock:
path = self._GenerateExpectedResizedThumbnailPath( hash )
if not os.path.exists( path ):
self._GenerateResizedThumbnail( hash, mime )
return path
def LocklessHasFullSizeThumbnail( self, hash ):
path = self._GenerateExpectedFullSizeThumbnailPath( hash )
return os.path.exists( path )
def Rebalance( self, job_key ):
try:
if self._bad_error_occured:
wx.MessageBox( 'A serious file error has previously occured during this session, so further file moving will not be reattempted. Please restart the client before trying again.' )
return
with self._lock:
rebalance_tuple = self._GetRebalanceTuple()
while rebalance_tuple is not None:
if job_key.IsCancelled():
break
( prefix, overweight_location, underweight_location ) = rebalance_tuple
text = 'Moving \'' + prefix + '\' from ' + overweight_location + ' to ' + underweight_location
HydrusData.Print( text )
job_key.SetVariable( 'popup_text_1', text )
# these two lines can cause a deadlock because the db sometimes calls stuff in here.
self._controller.Write( 'relocate_client_files', prefix, overweight_location, underweight_location )
self._Reinit()
rebalance_tuple = self._GetRebalanceTuple()
recover_tuple = self._GetRecoverTuple()
while recover_tuple is not None:
if job_key.IsCancelled():
break
( prefix, recoverable_location, correct_location ) = recover_tuple
text = 'Recovering \'' + prefix + '\' from ' + recoverable_location + ' to ' + correct_location
HydrusData.Print( text )
job_key.SetVariable( 'popup_text_1', text )
recoverable_path = os.path.join( recoverable_location, prefix )
correct_path = os.path.join( correct_location, prefix )
HydrusPaths.MergeTree( recoverable_path, correct_path )
recover_tuple = self._GetRecoverTuple()
finally:
job_key.SetVariable( 'popup_text_1', 'done!' )
job_key.Finish()
job_key.Delete()
def RebalanceWorkToDo( self ):
with self._lock:
return self._GetRebalanceTuple() is not None
def RegenerateResizedThumbnail( self, hash, mime ):
with self._lock:
self.LocklessRegenerateResizedThumbnail( hash, mime )
def LocklessRegenerateResizedThumbnail( self, hash, mime ):
self._GenerateResizedThumbnail( hash, mime )
def RegenerateThumbnails( self, only_do_missing = False ):
with self._lock:
job_key = ClientThreading.JobKey( cancellable = True )
job_key.SetVariable( 'popup_title', 'regenerating thumbnails' )
job_key.SetVariable( 'popup_text_1', 'creating directories' )
self._controller.pub( 'modal_message', job_key )
num_broken = 0
for ( i, path ) in enumerate( self._IterateAllFilePaths() ):
try:
while job_key.IsPaused() or job_key.IsCancelled():
time.sleep( 0.1 )
if job_key.IsCancelled():
job_key.SetVariable( 'popup_text_1', 'cancelled' )
HydrusData.Print( job_key.ToString() )
return
job_key.SetVariable( 'popup_text_1', HydrusData.ToHumanInt( i ) + ' done' )
( base, filename ) = os.path.split( path )
if '.' in filename:
( hash_encoded, ext ) = filename.split( '.', 1 )
else:
continue # it is an update file, so let's save us some ffmpeg lag and logspam
hash = hash_encoded.decode( 'hex' )
full_size_path = self._GenerateExpectedFullSizeThumbnailPath( hash )
if only_do_missing and os.path.exists( full_size_path ):
continue
mime = HydrusFileHandling.GetMime( path )
if mime in HC.MIMES_WITH_THUMBNAILS:
self._GenerateFullSizeThumbnail( hash, mime )
thumbnail_resized_path = self._GenerateExpectedResizedThumbnailPath( hash )
if os.path.exists( thumbnail_resized_path ):
ClientPaths.DeletePath( thumbnail_resized_path, always_delete_fully = True )
except:
HydrusData.Print( path )
HydrusData.Print( traceback.format_exc() )
num_broken += 1
if num_broken > 0:
job_key.SetVariable( 'popup_text_1', 'done! ' + HydrusData.ToHumanInt( num_broken ) + ' files caused errors, which have been written to the log.' )
else:
job_key.SetVariable( 'popup_text_1', 'done!' )
HydrusData.Print( job_key.ToString() )
job_key.Finish()
class DataCache( object ):
def __init__( self, controller, cache_size, timeout = 1200 ):
self._controller = controller
self._cache_size = cache_size
self._timeout = timeout
self._keys_to_data = {}
self._keys_fifo = collections.OrderedDict()
self._total_estimated_memory_footprint = 0
self._lock = threading.Lock()
self._controller.sub( self, 'MaintainCache', 'memory_maintenance_pulse' )
def _Delete( self, key ):
if key not in self._keys_to_data:
return
deletee_data = self._keys_to_data[ key ]
del self._keys_to_data[ key ]
self._RecalcMemoryUsage()
def _DeleteItem( self ):
( deletee_key, last_access_time ) = self._keys_fifo.popitem( last = False )
self._Delete( deletee_key )
def _RecalcMemoryUsage( self ):
self._total_estimated_memory_footprint = sum( ( data.GetEstimatedMemoryFootprint() for data in self._keys_to_data.values() ) )
def _TouchKey( self, key ):
# have to delete first, rather than overwriting, so the ordereddict updates its internal order
if key in self._keys_fifo:
del self._keys_fifo[ key ]
self._keys_fifo[ key ] = HydrusData.GetNow()
def Clear( self ):
with self._lock:
self._keys_to_data = {}
self._keys_fifo = collections.OrderedDict()
self._total_estimated_memory_footprint = 0
def AddData( self, key, data ):
with self._lock:
if key not in self._keys_to_data:
while self._total_estimated_memory_footprint > self._cache_size:
self._DeleteItem()
self._keys_to_data[ key ] = data
self._TouchKey( key )
self._RecalcMemoryUsage()
def DeleteData( self, key ):
with self._lock:
self._Delete( key )
def GetData( self, key ):
with self._lock:
if key not in self._keys_to_data:
raise Exception( 'Cache error! Looking for ' + HydrusData.ToUnicode( key ) + ', but it was missing.' )
self._TouchKey( key )
return self._keys_to_data[ key ]
def GetIfHasData( self, key ):
with self._lock:
if key in self._keys_to_data:
self._TouchKey( key )
return self._keys_to_data[ key ]
else:
return None
def HasData( self, key ):
with self._lock:
return key in self._keys_to_data
def MaintainCache( self ):
with self._lock:
while True:
if len( self._keys_fifo ) == 0:
break
else:
( key, last_access_time ) = next( self._keys_fifo.iteritems() )
if HydrusData.TimeHasPassed( last_access_time + self._timeout ):
self._DeleteItem()
else:
break
class LocalBooruCache( object ):
def __init__( self, controller ):
self._controller = controller
self._lock = threading.Lock()
self._RefreshShares()
self._controller.sub( self, 'RefreshShares', 'refresh_local_booru_shares' )
self._controller.sub( self, 'RefreshShares', 'restart_booru' )
def _CheckDataUsage( self ):
if not self._local_booru_service.BandwidthOK():
raise HydrusExceptions.ForbiddenException( 'This booru has used all its monthly data. Please try again next month.' )
def _CheckFileAuthorised( self, share_key, hash ):
self._CheckShareAuthorised( share_key )
info = self._GetInfo( share_key )
if hash not in info[ 'hashes_set' ]:
raise HydrusExceptions.NotFoundException( 'That file was not found in that share.' )
def _CheckShareAuthorised( self, share_key ):
self._CheckDataUsage()
info = self._GetInfo( share_key )
timeout = info[ 'timeout' ]
if timeout is not None and HydrusData.TimeHasPassed( timeout ):
raise HydrusExceptions.ForbiddenException( 'This share has expired.' )
def _GetInfo( self, share_key ):
try: info = self._keys_to_infos[ share_key ]
except: raise HydrusExceptions.NotFoundException( 'Did not find that share on this booru.' )
if info is None:
info = self._controller.Read( 'local_booru_share', share_key )
hashes = info[ 'hashes' ]
info[ 'hashes_set' ] = set( hashes )
media_results = self._controller.Read( 'media_results', hashes )
info[ 'media_results' ] = media_results
hashes_to_media_results = { media_result.GetHash() : media_result for media_result in media_results }
info[ 'hashes_to_media_results' ] = hashes_to_media_results
self._keys_to_infos[ share_key ] = info
return info
def _RefreshShares( self ):
self._local_booru_service = self._controller.services_manager.GetService( CC.LOCAL_BOORU_SERVICE_KEY )
self._keys_to_infos = {}
share_keys = self._controller.Read( 'local_booru_share_keys' )
for share_key in share_keys: self._keys_to_infos[ share_key ] = None
def CheckShareAuthorised( self, share_key ):
with self._lock: self._CheckShareAuthorised( share_key )
def CheckFileAuthorised( self, share_key, hash ):
with self._lock: self._CheckFileAuthorised( share_key, hash )
def GetGalleryInfo( self, share_key ):
with self._lock:
self._CheckShareAuthorised( share_key )
info = self._GetInfo( share_key )
name = info[ 'name' ]
text = info[ 'text' ]
timeout = info[ 'timeout' ]
media_results = info[ 'media_results' ]
return ( name, text, timeout, media_results )
def GetMediaResult( self, share_key, hash ):
with self._lock:
info = self._GetInfo( share_key )
media_result = info[ 'hashes_to_media_results' ][ hash ]
return media_result
def GetPageInfo( self, share_key, hash ):
with self._lock:
self._CheckFileAuthorised( share_key, hash )
info = self._GetInfo( share_key )
name = info[ 'name' ]
text = info[ 'text' ]
timeout = info[ 'timeout' ]
media_result = info[ 'hashes_to_media_results' ][ hash ]
return ( name, text, timeout, media_result )
def RefreshShares( self ):
with self._lock:
self._RefreshShares()
class MenuEventIdToActionCache( object ):
def __init__( self ):
self._ids_to_actions = {}
self._actions_to_ids = {}
self._temporary_ids = set()
self._free_temporary_ids = set()
def _ClearTemporaries( self ):
for temporary_id in self._temporary_ids.difference( self._free_temporary_ids ):
temporary_action = self._ids_to_actions[ temporary_id ]
del self._ids_to_actions[ temporary_id ]
del self._actions_to_ids[ temporary_action ]
self._free_temporary_ids = set( self._temporary_ids )
def _GetNewId( self, temporary ):
if temporary:
if len( self._free_temporary_ids ) == 0:
new_id = wx.NewId()
self._temporary_ids.add( new_id )
self._free_temporary_ids.add( new_id )
return self._free_temporary_ids.pop()
else:
return wx.NewId()
def GetAction( self, event_id ):
action = None
if event_id in self._ids_to_actions:
action = self._ids_to_actions[ event_id ]
if event_id in self._temporary_ids:
self._ClearTemporaries()
return action
def GetId( self, command, data = None, temporary = False ):
action = ( command, data )
if action not in self._actions_to_ids:
event_id = self._GetNewId( temporary )
self._ids_to_actions[ event_id ] = action
self._actions_to_ids[ action ] = event_id
return self._actions_to_ids[ action ]
def GetPermanentId( self, command, data = None ):
return self.GetId( command, data, False )
def GetTemporaryId( self, command, data = None ):
temporary = True
if data is None:
temporary = False
return self.GetId( command, data, temporary )
MENU_EVENT_ID_TO_ACTION_CACHE = MenuEventIdToActionCache()
class ParsingCache( object ):
def __init__( self ):
self._html_to_soups = {}
self._json_to_jsons = {}
self._lock = threading.Lock()
def _CleanCache( self ):
for cache in ( self._html_to_soups, self._json_to_jsons ):
dead_datas = set()
for ( data, ( last_accessed, parsed_object ) ) in cache.items():
if HydrusData.TimeHasPassed( last_accessed + 10 ):
dead_datas.add( data )
for dead_data in dead_datas:
del cache[ dead_data ]
def CleanCache( self ):
with self._lock:
self._CleanCache()
def GetJSON( self, json_text ):
with self._lock:
now = HydrusData.GetNow()
if json_text not in self._json_to_jsons:
json_object = json.loads( json_text )
self._json_to_jsons[ json_text ] = ( now, json_object )
( last_accessed, json_object ) = self._json_to_jsons[ json_text ]
if last_accessed != now:
self._json_to_jsons[ json_text ] = ( now, json_object )
if len( self._json_to_jsons ) > 10:
self._CleanCache()
return json_object
def GetSoup( self, html ):
with self._lock:
now = HydrusData.GetNow()
if html not in self._html_to_soups:
soup = ClientParsing.GetSoup( html )
self._html_to_soups[ html ] = ( now, soup )
( last_accessed, soup ) = self._html_to_soups[ html ]
if last_accessed != now:
self._html_to_soups[ html ] = ( now, soup )
if len( self._html_to_soups ) > 10:
self._CleanCache()
return soup
class RenderedImageCache( object ):
def __init__( self, controller ):
self._controller = controller
cache_size = self._controller.options[ 'fullscreen_cache_size' ]
cache_timeout = self._controller.new_options.GetInteger( 'image_cache_timeout' )
self._data_cache = DataCache( self._controller, cache_size, timeout = cache_timeout )
def Clear( self ):
self._data_cache.Clear()
def GetImageRenderer( self, media ):
hash = media.GetHash()
key = hash
result = self._data_cache.GetIfHasData( key )
if result is None:
image_renderer = ClientRendering.ImageRenderer( media )
self._data_cache.AddData( key, image_renderer )
else:
image_renderer = result
return image_renderer
def HasImageRenderer( self, hash ):
key = hash
return self._data_cache.HasData( key )
class ThumbnailCache( object ):
def __init__( self, controller ):
self._controller = controller
cache_size = self._controller.options[ 'thumbnail_cache_size' ]
cache_timeout = self._controller.new_options.GetInteger( 'thumbnail_cache_timeout' )
self._data_cache = DataCache( self._controller, cache_size, timeout = cache_timeout )
self._lock = threading.Lock()
self._waterfall_queue_quick = set()
self._waterfall_queue_random = []
self._waterfall_event = threading.Event()
self._special_thumbs = {}
self.Clear()
self._controller.CallToThreadLongRunning( self.DAEMONWaterfall )
self._controller.sub( self, 'Clear', 'thumbnail_resize' )
self._controller.sub( self, 'ClearThumbnails', 'clear_thumbnails' )
def _GetResizedHydrusBitmapFromHardDrive( self, display_media ):
thumbnail_dimensions = self._controller.options[ 'thumbnail_dimensions' ]
if tuple( thumbnail_dimensions ) == HC.UNSCALED_THUMBNAIL_DIMENSIONS:
full_size = True
else:
full_size = False
hash = display_media.GetHash()
mime = display_media.GetMime()
locations_manager = display_media.GetLocationsManager()
try:
if full_size:
path = self._controller.client_files_manager.GetFullSizeThumbnailPath( hash, mime )
else:
path = self._controller.client_files_manager.GetResizedThumbnailPath( hash, mime )
except HydrusExceptions.FileMissingException as e:
if locations_manager.IsLocal():
HydrusData.ShowException( e )
return self._special_thumbs[ 'hydrus' ]
mime = display_media.GetMime()
try:
hydrus_bitmap = ClientRendering.GenerateHydrusBitmap( path, mime )
except Exception as e:
try:
self._controller.client_files_manager.RegenerateResizedThumbnail( hash, mime )
try:
hydrus_bitmap = ClientRendering.GenerateHydrusBitmap( path, mime )
except Exception as e:
HydrusData.ShowException( e )
raise HydrusExceptions.FileMissingException( 'The thumbnail for file ' + hash.encode( 'hex' ) + ' was broken. It was regenerated, but the new file would not render for the above reason. Please inform the hydrus developer what has happened.' )
except Exception as e:
HydrusData.ShowException( e )
return self._special_thumbs[ 'hydrus' ]
( media_x, media_y ) = display_media.GetResolution()
( actual_x, actual_y ) = hydrus_bitmap.GetSize()
( desired_x, desired_y ) = self._controller.options[ 'thumbnail_dimensions' ]
too_large = actual_x > desired_x or actual_y > desired_y
small_original_image = actual_x == media_x and actual_y == media_y
too_small = actual_x < desired_x and actual_y < desired_y
if too_large or ( too_small and not small_original_image ):
self._controller.client_files_manager.RegenerateResizedThumbnail( hash, mime )
hydrus_bitmap = ClientRendering.GenerateHydrusBitmap( path, mime )
return hydrus_bitmap
def _RecalcWaterfallQueueRandom( self ):
# here we sort by the hash since this is both breddy random and more likely to access faster on a well defragged hard drive!
def sort_by_hash_key( ( page_key, media ) ):
return media.GetDisplayMedia().GetHash()
self._waterfall_queue_random = list( self._waterfall_queue_quick )
self._waterfall_queue_random.sort( key = sort_by_hash_key )
def CancelWaterfall( self, page_key, medias ):
with self._lock:
self._waterfall_queue_quick.difference_update( ( ( page_key, media ) for media in medias ) )
self._RecalcWaterfallQueueRandom()
def Clear( self ):
with self._lock:
self._data_cache.Clear()
self._special_thumbs = {}
names = [ 'hydrus', 'pdf', 'audio', 'video', 'zip' ]
( os_file_handle, temp_path ) = ClientPaths.GetTempPath()
try:
for name in names:
path = os.path.join( HC.STATIC_DIR, name + '.png' )
thumbnail_dimensions = self._controller.options[ 'thumbnail_dimensions' ]
thumbnail = HydrusFileHandling.GenerateThumbnailFromStaticImage( path, thumbnail_dimensions, HC.IMAGE_PNG )
with open( temp_path, 'wb' ) as f:
f.write( thumbnail )
hydrus_bitmap = ClientRendering.GenerateHydrusBitmap( temp_path, HC.IMAGE_PNG )
self._special_thumbs[ name ] = hydrus_bitmap
finally:
HydrusPaths.CleanUpTempPath( os_file_handle, temp_path )
def ClearThumbnails( self, hashes ):
with self._lock:
for hash in hashes:
self._data_cache.DeleteData( hash )
def DoingWork( self ):
with self._lock:
return len( self._waterfall_queue_random ) > 0
def GetThumbnail( self, media ):
try:
display_media = media.GetDisplayMedia()
except:
# sometimes media can get switched around during a collect event, and if this happens during waterfall, we have a problem here
# just return for now, we'll see how it goes
return self._special_thumbs[ 'hydrus' ]
locations_manager = display_media.GetLocationsManager()
if locations_manager.ShouldIdeallyHaveThumbnail():
mime = display_media.GetMime()
if mime in HC.MIMES_WITH_THUMBNAILS:
hash = display_media.GetHash()
result = self._data_cache.GetIfHasData( hash )
if result is None:
if locations_manager.ShouldDefinitelyHaveThumbnail():
# local file, should be able to regen if needed
hydrus_bitmap = self._GetResizedHydrusBitmapFromHardDrive( display_media )
else:
# repository file, maybe not actually available yet
try:
hydrus_bitmap = self._GetResizedHydrusBitmapFromHardDrive( display_media )
except:
hydrus_bitmap = self._special_thumbs[ 'hydrus' ]
self._data_cache.AddData( hash, hydrus_bitmap )
else:
hydrus_bitmap = result
return hydrus_bitmap
elif mime in HC.AUDIO: return self._special_thumbs[ 'audio' ]
elif mime in HC.VIDEO: return self._special_thumbs[ 'video' ]
elif mime == HC.APPLICATION_PDF: return self._special_thumbs[ 'pdf' ]
elif mime in HC.ARCHIVES: return self._special_thumbs[ 'zip' ]
else: return self._special_thumbs[ 'hydrus' ]
else:
return self._special_thumbs[ 'hydrus' ]
def HasThumbnailCached( self, media ):
display_media = media.GetDisplayMedia()
mime = display_media.GetMime()
if mime in HC.MIMES_WITH_THUMBNAILS:
hash = display_media.GetHash()
return self._data_cache.HasData( hash )
else:
return True
def Waterfall( self, page_key, medias ):
with self._lock:
self._waterfall_queue_quick.update( ( ( page_key, media ) for media in medias ) )
self._RecalcWaterfallQueueRandom()
self._waterfall_event.set()
def DAEMONWaterfall( self ):
last_paused = HydrusData.GetNowPrecise()
while not HydrusThreading.IsThreadShuttingDown():
with self._lock:
do_wait = len( self._waterfall_queue_random ) == 0
if do_wait:
self._waterfall_event.wait( 1 )
self._waterfall_event.clear()
last_paused = HydrusData.GetNowPrecise()
start_time = HydrusData.GetNowPrecise()
stop_time = start_time + 0.005 # a bit of a typical frame
page_keys_to_rendered_medias = collections.defaultdict( list )
while not HydrusData.TimeHasPassedPrecise( stop_time ):
with self._lock:
if len( self._waterfall_queue_random ) == 0:
break
result = self._waterfall_queue_random.pop()
self._waterfall_queue_quick.discard( result )
( page_key, media ) = result
try:
self.GetThumbnail( media ) # to load it
page_keys_to_rendered_medias[ page_key ].append( media )
except Exception as e:
HydrusData.ShowException( e )
for ( page_key, rendered_medias ) in page_keys_to_rendered_medias.items():
self._controller.pub( 'waterfall_thumbnails', page_key, rendered_medias )
time.sleep( 0.00001 )
class ServicesManager( object ):
def __init__( self, controller ):
self._controller = controller
self._lock = threading.Lock()
self._keys_to_services = {}
self._services_sorted = []
self.RefreshServices()
self._controller.sub( self, 'RefreshServices', 'notify_new_services_data' )
def _GetService( self, service_key ):
try:
return self._keys_to_services[ service_key ]
except KeyError:
raise HydrusExceptions.DataMissing( 'That service was not found!' )
def _SetServices( self, services ):
self._keys_to_services = { service.GetServiceKey() : service for service in services }
self._keys_to_services[ CC.TEST_SERVICE_KEY ] = ClientServices.GenerateService( CC.TEST_SERVICE_KEY, HC.TEST_SERVICE, CC.TEST_SERVICE_KEY )
def compare_function( a, b ):
return cmp( a.GetName(), b.GetName() )
self._services_sorted = list( services )
self._services_sorted.sort( cmp = compare_function )
def Filter( self, service_keys, desired_types ):
with self._lock:
def func( service_key ):
return self._keys_to_services[ service_key ].GetServiceType() in desired_types
filtered_service_keys = filter( func, service_keys )
return filtered_service_keys
def FilterValidServiceKeys( self, service_keys ):
with self._lock:
def func( service_key ):
return service_key in self._keys_to_services
filtered_service_keys = filter( func, service_keys )
return filtered_service_keys
def GetName( self, service_key ):
with self._lock:
service = self._GetService( service_key )
return service.GetName()
def GetService( self, service_key ):
with self._lock:
return self._GetService( service_key )
def GetServiceType( self, service_key ):
with self._lock:
return self._GetService( service_key ).GetServiceType()
def GetServiceKeys( self, desired_types = HC.ALL_SERVICES ):
with self._lock:
filtered_service_keys = [ service_key for ( service_key, service ) in self._keys_to_services.items() if service.GetServiceType() in desired_types ]
return filtered_service_keys
def GetServices( self, desired_types = HC.ALL_SERVICES, randomised = True ):
with self._lock:
def func( service ):
return service.GetServiceType() in desired_types
services = filter( func, self._services_sorted )
if randomised:
random.shuffle( services )
return services
def RefreshServices( self ):
with self._lock:
services = self._controller.Read( 'services' )
self._SetServices( services )
def ServiceExists( self, service_key ):
with self._lock:
return service_key in self._keys_to_services
class ShortcutsManager( object ):
def __init__( self, controller ):
self._controller = controller
self._shortcuts = {}
self.RefreshShortcuts()
self._controller.sub( self, 'RefreshShortcuts', 'new_shortcuts' )
def GetCommand( self, shortcuts_names, shortcut ):
for name in shortcuts_names:
if name in self._shortcuts:
command = self._shortcuts[ name ].GetCommand( shortcut )
if command is not None:
if HG.gui_report_mode:
HydrusData.ShowText( 'command matched: ' + repr( command ) )
return command
return None
def RefreshShortcuts( self ):
self._shortcuts = {}
all_shortcuts = HG.client_controller.Read( 'serialisable_named', HydrusSerialisable.SERIALISABLE_TYPE_SHORTCUTS )
for shortcuts in all_shortcuts:
self._shortcuts[ shortcuts.GetName() ] = shortcuts
class TagCensorshipManager( object ):
def __init__( self, controller ):
self._controller = controller
self.RefreshData()
self._controller.sub( self, 'RefreshData', 'notify_new_tag_censorship' )
def _CensorshipMatches( self, tag, blacklist, censorships ):
if blacklist:
return not HydrusTags.CensorshipMatch( tag, censorships )
else:
return HydrusTags.CensorshipMatch( tag, censorships )
def GetInfo( self, service_key ):
if service_key in self._service_keys_to_info: return self._service_keys_to_info[ service_key ]
else: return ( True, set() )
def RefreshData( self ):
rows = self._controller.Read( 'tag_censorship' )
self._service_keys_to_info = { service_key : ( blacklist, censorships ) for ( service_key, blacklist, censorships ) in rows }
def FilterPredicates( self, service_key, predicates ):
for service_key_lookup in ( CC.COMBINED_TAG_SERVICE_KEY, service_key ):
if service_key_lookup in self._service_keys_to_info:
( blacklist, censorships ) = self._service_keys_to_info[ service_key_lookup ]
predicates = [ predicate for predicate in predicates if predicate.GetType() != HC.PREDICATE_TYPE_TAG or self._CensorshipMatches( predicate.GetValue(), blacklist, censorships ) ]
return predicates
def FilterStatusesToPairs( self, service_key, statuses_to_pairs ):
for service_key_lookup in ( CC.COMBINED_TAG_SERVICE_KEY, service_key ):
if service_key_lookup in self._service_keys_to_info:
( blacklist, censorships ) = self._service_keys_to_info[ service_key_lookup ]
new_statuses_to_pairs = HydrusData.default_dict_set()
for ( status, pairs ) in statuses_to_pairs.items():
new_statuses_to_pairs[ status ] = { ( one, two ) for ( one, two ) in pairs if self._CensorshipMatches( one, blacklist, censorships ) and self._CensorshipMatches( two, blacklist, censorships ) }
statuses_to_pairs = new_statuses_to_pairs
return statuses_to_pairs
def FilterServiceKeysToStatusesToTags( self, service_keys_to_statuses_to_tags ):
if CC.COMBINED_TAG_SERVICE_KEY in self._service_keys_to_info:
( blacklist, censorships ) = self._service_keys_to_info[ CC.COMBINED_TAG_SERVICE_KEY ]
service_keys = service_keys_to_statuses_to_tags.keys()
for service_key in service_keys:
statuses_to_tags = service_keys_to_statuses_to_tags[ service_key ]
statuses = statuses_to_tags.keys()
for status in statuses:
tags = statuses_to_tags[ status ]
statuses_to_tags[ status ] = { tag for tag in tags if self._CensorshipMatches( tag, blacklist, censorships ) }
for ( service_key, ( blacklist, censorships ) ) in self._service_keys_to_info.items():
if service_key == CC.COMBINED_TAG_SERVICE_KEY:
continue
if service_key in service_keys_to_statuses_to_tags:
statuses_to_tags = service_keys_to_statuses_to_tags[ service_key ]
statuses = statuses_to_tags.keys()
for status in statuses:
tags = statuses_to_tags[ status ]
statuses_to_tags[ status ] = { tag for tag in tags if self._CensorshipMatches( tag, blacklist, censorships ) }
return service_keys_to_statuses_to_tags
def FilterTags( self, service_key, tags ):
for service_key_lookup in ( CC.COMBINED_TAG_SERVICE_KEY, service_key ):
if service_key_lookup in self._service_keys_to_info:
( blacklist, censorships ) = self._service_keys_to_info[ service_key_lookup ]
tags = { tag for tag in tags if self._CensorshipMatches( tag, blacklist, censorships ) }
return tags
class TagParentsManager( object ):
def __init__( self, controller ):
self._controller = controller
self._dirty = False
self._service_keys_to_children_to_parents = collections.defaultdict( HydrusData.default_dict_list )
self._RefreshParents()
self._lock = threading.Lock()
self._controller.sub( self, 'NotifyNewParents', 'notify_new_parents' )
def _RefreshParents( self ):
service_keys_to_statuses_to_pairs = self._controller.Read( 'tag_parents' )
# first collapse siblings
sibling_manager = self._controller.GetManager( 'tag_siblings' )
collapsed_service_keys_to_statuses_to_pairs = collections.defaultdict( HydrusData.default_dict_set )
for ( service_key, statuses_to_pairs ) in service_keys_to_statuses_to_pairs.items():
if service_key == CC.COMBINED_TAG_SERVICE_KEY: continue
for ( status, pairs ) in statuses_to_pairs.items():
pairs = sibling_manager.CollapsePairs( service_key, pairs )
collapsed_service_keys_to_statuses_to_pairs[ service_key ][ status ] = pairs
# now collapse current and pending
service_keys_to_pairs_flat = HydrusData.default_dict_set()
for ( service_key, statuses_to_pairs ) in collapsed_service_keys_to_statuses_to_pairs.items():
pairs_flat = statuses_to_pairs[ HC.CONTENT_STATUS_CURRENT ].union( statuses_to_pairs[ HC.CONTENT_STATUS_PENDING ] )
service_keys_to_pairs_flat[ service_key ] = pairs_flat
# now create the combined tag service
combined_pairs_flat = set()
for pairs_flat in service_keys_to_pairs_flat.values():
combined_pairs_flat.update( pairs_flat )
service_keys_to_pairs_flat[ CC.COMBINED_TAG_SERVICE_KEY ] = combined_pairs_flat
#
service_keys_to_simple_children_to_parents = BuildServiceKeysToSimpleChildrenToParents( service_keys_to_pairs_flat )
self._service_keys_to_children_to_parents = BuildServiceKeysToChildrenToParents( service_keys_to_simple_children_to_parents )
def ExpandPredicates( self, service_key, predicates ):
if self._controller.new_options.GetBoolean( 'apply_all_parents_to_all_services' ):
service_key = CC.COMBINED_TAG_SERVICE_KEY
results = []
with self._lock:
for predicate in predicates:
results.append( predicate )
if predicate.GetType() == HC.PREDICATE_TYPE_TAG:
tag = predicate.GetValue()
parents = self._service_keys_to_children_to_parents[ service_key ][ tag ]
for parent in parents:
parent_predicate = ClientSearch.Predicate( HC.PREDICATE_TYPE_PARENT, parent )
results.append( parent_predicate )
return results
def ExpandTags( self, service_key, tags ):
if self._controller.new_options.GetBoolean( 'apply_all_parents_to_all_services' ):
service_key = CC.COMBINED_TAG_SERVICE_KEY
with self._lock:
tags_results = set( tags )
for tag in tags:
tags_results.update( self._service_keys_to_children_to_parents[ service_key ][ tag ] )
return tags_results
def GetParents( self, service_key, tag ):
if self._controller.new_options.GetBoolean( 'apply_all_parents_to_all_services' ):
service_key = CC.COMBINED_TAG_SERVICE_KEY
with self._lock:
return self._service_keys_to_children_to_parents[ service_key ][ tag ]
def NotifyNewParents( self ):
with self._lock:
self._dirty = True
self._controller.CallLater( 1.0, self.RefreshParentsIfDirty )
def RefreshParentsIfDirty( self ):
with self._lock:
if self._dirty:
self._RefreshParents()
self._dirty = False
class TagSiblingsManager( object ):
def __init__( self, controller ):
self._controller = controller
self._dirty = False
self._service_keys_to_siblings = collections.defaultdict( dict )
self._service_keys_to_reverse_lookup = collections.defaultdict( dict )
self._RefreshSiblings()
self._lock = threading.Lock()
self._controller.sub( self, 'NotifyNewSiblings', 'notify_new_siblings_data' )
def _CollapseTags( self, service_key, tags ):
siblings = self._service_keys_to_siblings[ service_key ]
return { siblings[ tag ] if tag in siblings else tag for tag in tags }
def _RefreshSiblings( self ):
self._service_keys_to_siblings = collections.defaultdict( dict )
self._service_keys_to_reverse_lookup = collections.defaultdict( dict )
local_tags_pairs = set()
tag_repo_pairs = set()
service_keys_to_statuses_to_pairs = self._controller.Read( 'tag_siblings' )
for ( service_key, statuses_to_pairs ) in service_keys_to_statuses_to_pairs.items():
all_pairs = statuses_to_pairs[ HC.CONTENT_STATUS_CURRENT ].union( statuses_to_pairs[ HC.CONTENT_STATUS_PENDING ] )
if service_key == CC.LOCAL_TAG_SERVICE_KEY:
local_tags_pairs = set( all_pairs )
else:
tag_repo_pairs.update( all_pairs )
siblings = CollapseTagSiblingPairs( [ all_pairs ] )
self._service_keys_to_siblings[ service_key ] = siblings
reverse_lookup = collections.defaultdict( list )
for ( bad, good ) in siblings.items():
reverse_lookup[ good ].append( bad )
self._service_keys_to_reverse_lookup[ service_key ] = reverse_lookup
combined_siblings = CollapseTagSiblingPairs( [ local_tags_pairs, tag_repo_pairs ] )
self._service_keys_to_siblings[ CC.COMBINED_TAG_SERVICE_KEY ] = combined_siblings
combined_reverse_lookup = collections.defaultdict( list )
for ( bad, good ) in combined_siblings.items():
combined_reverse_lookup[ good ].append( bad )
self._service_keys_to_reverse_lookup[ CC.COMBINED_TAG_SERVICE_KEY ] = combined_reverse_lookup
self._controller.pub( 'new_siblings_gui' )
def CollapsePredicates( self, service_key, predicates ):
if self._controller.new_options.GetBoolean( 'apply_all_siblings_to_all_services' ):
service_key = CC.COMBINED_TAG_SERVICE_KEY
with self._lock:
siblings = self._service_keys_to_siblings[ service_key ]
results = [ predicate for predicate in predicates if predicate.GetType() != HC.PREDICATE_TYPE_TAG ]
tag_predicates = [ predicate for predicate in predicates if predicate.GetType() == HC.PREDICATE_TYPE_TAG ]
tags_to_predicates = { predicate.GetValue() : predicate for predicate in predicates if predicate.GetType() == HC.PREDICATE_TYPE_TAG }
tags = tags_to_predicates.keys()
tags_to_include_in_results = set()
for tag in tags:
if tag in siblings:
old_tag = tag
old_predicate = tags_to_predicates[ old_tag ]
new_tag = siblings[ old_tag ]
if new_tag not in tags_to_predicates:
( old_pred_type, old_value, old_inclusive ) = old_predicate.GetInfo()
new_predicate = ClientSearch.Predicate( old_pred_type, new_tag, old_inclusive )
tags_to_predicates[ new_tag ] = new_predicate
tags_to_include_in_results.add( new_tag )
new_predicate = tags_to_predicates[ new_tag ]
new_predicate.AddCounts( old_predicate )
else:
tags_to_include_in_results.add( tag )
results.extend( [ tags_to_predicates[ tag ] for tag in tags_to_include_in_results ] )
return results
def CollapsePairs( self, service_key, pairs ):
if self._controller.new_options.GetBoolean( 'apply_all_siblings_to_all_services' ):
service_key = CC.COMBINED_TAG_SERVICE_KEY
with self._lock:
siblings = self._service_keys_to_siblings[ service_key ]
result = set()
for ( a, b ) in pairs:
if a in siblings:
a = siblings[ a ]
if b in siblings:
b = siblings[ b ]
result.add( ( a, b ) )
return result
def CollapseStatusesToTags( self, service_key, statuses_to_tags ):
if self._controller.new_options.GetBoolean( 'apply_all_siblings_to_all_services' ):
service_key = CC.COMBINED_TAG_SERVICE_KEY
with self._lock:
statuses = statuses_to_tags.keys()
new_statuses_to_tags = HydrusData.default_dict_set()
for status in statuses:
new_statuses_to_tags[ status ] = self._CollapseTags( service_key, statuses_to_tags[ status ] )
return new_statuses_to_tags
def CollapseTag( self, service_key, tag ):
if self._controller.new_options.GetBoolean( 'apply_all_siblings_to_all_services' ):
service_key = CC.COMBINED_TAG_SERVICE_KEY
with self._lock:
siblings = self._service_keys_to_siblings[ service_key ]
if tag in siblings:
return siblings[ tag ]
else:
return tag
def CollapseTags( self, service_key, tags ):
if self._controller.new_options.GetBoolean( 'apply_all_siblings_to_all_services' ):
service_key = CC.COMBINED_TAG_SERVICE_KEY
with self._lock:
return self._CollapseTags( service_key, tags )
def CollapseTagsToCount( self, service_key, tags_to_count ):
if self._controller.new_options.GetBoolean( 'apply_all_siblings_to_all_services' ):
service_key = CC.COMBINED_TAG_SERVICE_KEY
with self._lock:
siblings = self._service_keys_to_siblings[ service_key ]
results = collections.Counter()
for ( tag, count ) in tags_to_count.items():
if tag in siblings:
tag = siblings[ tag ]
results[ tag ] += count
return results
def GetAutocompleteSiblings( self, service_key, search_text, exact_match = False ):
if self._controller.new_options.GetBoolean( 'apply_all_siblings_to_all_services' ):
service_key = CC.COMBINED_TAG_SERVICE_KEY
with self._lock:
siblings = self._service_keys_to_siblings[ service_key ]
reverse_lookup = self._service_keys_to_reverse_lookup[ service_key ]
if exact_match:
key_based_matching_values = set()
if search_text in siblings:
key_based_matching_values = { siblings[ search_text ] }
else:
key_based_matching_values = set()
value_based_matching_values = { value for value in siblings.values() if value == search_text }
else:
matching_keys = ClientSearch.FilterTagsBySearchText( service_key, search_text, siblings.keys(), search_siblings = False )
key_based_matching_values = { siblings[ key ] for key in matching_keys }
value_based_matching_values = ClientSearch.FilterTagsBySearchText( service_key, search_text, siblings.values(), search_siblings = False )
matching_values = key_based_matching_values.union( value_based_matching_values )
# all the matching values have a matching sibling somewhere in their network
# so now fetch the networks
lists_of_matching_keys = [ reverse_lookup[ value ] for value in matching_values ]
matching_keys = itertools.chain.from_iterable( lists_of_matching_keys )
matches = matching_values.union( matching_keys )
return matches
def GetSibling( self, service_key, tag ):
if self._controller.new_options.GetBoolean( 'apply_all_siblings_to_all_services' ):
service_key = CC.COMBINED_TAG_SERVICE_KEY
with self._lock:
siblings = self._service_keys_to_siblings[ service_key ]
if tag in siblings:
return siblings[ tag ]
else:
return None
def GetAllSiblings( self, service_key, tag ):
if self._controller.new_options.GetBoolean( 'apply_all_siblings_to_all_services' ):
service_key = CC.COMBINED_TAG_SERVICE_KEY
with self._lock:
siblings = self._service_keys_to_siblings[ service_key ]
reverse_lookup = self._service_keys_to_reverse_lookup[ service_key ]
if tag in siblings:
best_tag = siblings[ tag ]
elif tag in reverse_lookup:
best_tag = tag
else:
return [ tag ]
all_siblings = list( reverse_lookup[ best_tag ] )
all_siblings.append( best_tag )
return all_siblings
def NotifyNewSiblings( self ):
with self._lock:
self._dirty = True
self._controller.CallLater( 1.0, self.RefreshSiblingsIfDirty )
def RefreshSiblingsIfDirty( self ):
with self._lock:
if self._dirty:
self._RefreshSiblings()
self._dirty = False
class UndoManager( object ):
def __init__( self, controller ):
self._controller = controller
self._commands = []
self._inverted_commands = []
self._current_index = 0
self._lock = threading.Lock()
self._controller.sub( self, 'Undo', 'undo' )
self._controller.sub( self, 'Redo', 'redo' )
def _FilterServiceKeysToContentUpdates( self, service_keys_to_content_updates ):
filtered_service_keys_to_content_updates = {}
for ( service_key, content_updates ) in service_keys_to_content_updates.items():
filtered_content_updates = []
for content_update in content_updates:
( data_type, action, row ) = content_update.ToTuple()
if data_type == HC.CONTENT_TYPE_FILES:
if action in ( HC.CONTENT_UPDATE_ADD, HC.CONTENT_UPDATE_DELETE, HC.CONTENT_UPDATE_UNDELETE, HC.CONTENT_UPDATE_RESCIND_PETITION, HC.CONTENT_UPDATE_ADVANCED ):
continue
elif data_type == HC.CONTENT_TYPE_MAPPINGS:
if action in ( HC.CONTENT_UPDATE_RESCIND_PETITION, HC.CONTENT_UPDATE_ADVANCED ):
continue
else:
continue
filtered_content_update = HydrusData.ContentUpdate( data_type, action, row )
filtered_content_updates.append( filtered_content_update )
if len( filtered_content_updates ) > 0:
filtered_service_keys_to_content_updates[ service_key ] = filtered_content_updates
return filtered_service_keys_to_content_updates
def _InvertServiceKeysToContentUpdates( self, service_keys_to_content_updates ):
inverted_service_keys_to_content_updates = {}
for ( service_key, content_updates ) in service_keys_to_content_updates.items():
inverted_content_updates = []
for content_update in content_updates:
( data_type, action, row ) = content_update.ToTuple()
inverted_row = row
if data_type == HC.CONTENT_TYPE_FILES:
if action == HC.CONTENT_UPDATE_ARCHIVE: inverted_action = HC.CONTENT_UPDATE_INBOX
elif action == HC.CONTENT_UPDATE_INBOX: inverted_action = HC.CONTENT_UPDATE_ARCHIVE
elif action == HC.CONTENT_UPDATE_PEND: inverted_action = HC.CONTENT_UPDATE_RESCIND_PEND
elif action == HC.CONTENT_UPDATE_RESCIND_PEND: inverted_action = HC.CONTENT_UPDATE_PEND
elif action == HC.CONTENT_UPDATE_PETITION:
inverted_action = HC.CONTENT_UPDATE_RESCIND_PETITION
( hashes, reason ) = row
inverted_row = hashes
elif data_type == HC.CONTENT_TYPE_MAPPINGS:
if action == HC.CONTENT_UPDATE_ADD: inverted_action = HC.CONTENT_UPDATE_DELETE
elif action == HC.CONTENT_UPDATE_DELETE: inverted_action = HC.CONTENT_UPDATE_ADD
elif action == HC.CONTENT_UPDATE_PEND: inverted_action = HC.CONTENT_UPDATE_RESCIND_PEND
elif action == HC.CONTENT_UPDATE_RESCIND_PEND: inverted_action = HC.CONTENT_UPDATE_PEND
elif action == HC.CONTENT_UPDATE_PETITION:
inverted_action = HC.CONTENT_UPDATE_RESCIND_PETITION
( tag, hashes, reason ) = row
inverted_row = ( tag, hashes )
inverted_content_update = HydrusData.ContentUpdate( data_type, inverted_action, inverted_row )
inverted_content_updates.append( inverted_content_update )
inverted_service_keys_to_content_updates[ service_key ] = inverted_content_updates
return inverted_service_keys_to_content_updates
def AddCommand( self, action, *args, **kwargs ):
with self._lock:
inverted_action = action
inverted_args = args
inverted_kwargs = kwargs
if action == 'content_updates':
( service_keys_to_content_updates, ) = args
service_keys_to_content_updates = self._FilterServiceKeysToContentUpdates( service_keys_to_content_updates )
if len( service_keys_to_content_updates ) == 0: return
inverted_service_keys_to_content_updates = self._InvertServiceKeysToContentUpdates( service_keys_to_content_updates )
if len( inverted_service_keys_to_content_updates ) == 0: return
inverted_args = ( inverted_service_keys_to_content_updates, )
else: return
self._commands = self._commands[ : self._current_index ]
self._inverted_commands = self._inverted_commands[ : self._current_index ]
self._commands.append( ( action, args, kwargs ) )
self._inverted_commands.append( ( inverted_action, inverted_args, inverted_kwargs ) )
self._current_index += 1
self._controller.pub( 'notify_new_undo' )
def GetUndoRedoStrings( self ):
with self._lock:
( undo_string, redo_string ) = ( None, None )
if self._current_index > 0:
undo_index = self._current_index - 1
( action, args, kwargs ) = self._commands[ undo_index ]
if action == 'content_updates':
( service_keys_to_content_updates, ) = args
undo_string = 'undo ' + ClientData.ConvertServiceKeysToContentUpdatesToPrettyString( service_keys_to_content_updates )
if len( self._commands ) > 0 and self._current_index < len( self._commands ):
redo_index = self._current_index
( action, args, kwargs ) = self._commands[ redo_index ]
if action == 'content_updates':
( service_keys_to_content_updates, ) = args
redo_string = 'redo ' + ClientData.ConvertServiceKeysToContentUpdatesToPrettyString( service_keys_to_content_updates )
return ( undo_string, redo_string )
def Undo( self ):
action = None
with self._lock:
if self._current_index > 0:
self._current_index -= 1
( action, args, kwargs ) = self._inverted_commands[ self._current_index ]
if action is not None:
self._controller.WriteSynchronous( action, *args, **kwargs )
self._controller.pub( 'notify_new_undo' )
def Redo( self ):
action = None
with self._lock:
if len( self._commands ) > 0 and self._current_index < len( self._commands ):
( action, args, kwargs ) = self._commands[ self._current_index ]
self._current_index += 1
if action is not None:
self._controller.WriteSynchronous( action, *args, **kwargs )
self._controller.pub( 'notify_new_undo' )
| [
"hydrus.admin@gmail.com"
] | hydrus.admin@gmail.com |
78b7821eb4c330884d87322bb2e9c99f8af8f6d8 | acf2d43575f4be1fc97d0368073e338188e8bfae | /1_twosum.py | 9383da9effb20171eb6ecb6763371d53564f67f0 | [] | no_license | mingshaofeng/leetCode | 87e6011285168eabdcfad89a445c371aad1d0f46 | 0fbf165d4ff25a879db81c8958c191fa4728701f | refs/heads/master | 2020-12-31T08:47:12.951354 | 2020-02-08T12:26:28 | 2020-02-08T12:26:28 | 238,958,568 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 611 | py | '''
Given an array of integers, return indices of the two numbers such that they add up to a specific target.
You may assume that each input would have exactly one solution, and you may not use the same element twice.
Example:
Given nums = [2, 7, 11, 15], target = 9,
Because nums[0] + nums[1] = 2 + 7 = 9,
return [0, 1].
'''
# -*- coding:utf-8 -*-
def twoSum(nums,target):
hashmap={}
for i,n in enumerate(nums):
if target-n in hashmap:
return [hashmap[target-n],i]
hashmap[n]=i
if __name__=='__main__':
nums=[2,7,11,15]
target=9
print(twoSum(nums,target)) | [
"1812611764@qq.com"
] | 1812611764@qq.com |
5ba300fb8fe455146525b436819e316a5e780da1 | 163bbb4e0920dedd5941e3edfb2d8706ba75627d | /Code/CodeRecords/2811/61132/294777.py | 4cbb3ce02f1703d0fb35813ef04ff2bc5e50a6e3 | [] | no_license | AdamZhouSE/pythonHomework | a25c120b03a158d60aaa9fdc5fb203b1bb377a19 | ffc5606817a666aa6241cfab27364326f5c066ff | refs/heads/master | 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 237 | py | p,n=map(int,input().split())
l=[]
for i in range(n):
l.append(int(input()))
dic={}
for pos,i in enumerate(l):
key=i%p
if dic.get(key,'')=='':
print(pos+1)
break
else:
dic[key]=i
else:
print(-1) | [
"1069583789@qq.com"
] | 1069583789@qq.com |
5505cd4011c837c9e22cf9e9d81addb8442e050d | 11cd362cdd78c2fc48042ed203614b201ac94aa6 | /apps/oozie/src/oozie/migrations/0005_initial.py | 2688a433ed8dcc89995fc5f9b23a9defb2088449 | [
"CC-BY-3.0",
"LicenseRef-scancode-other-copyleft",
"LicenseRef-scancode-unknown-license-reference",
"ZPL-2.0",
"Unlicense",
"LGPL-3.0-only",
"CC0-1.0",
"LicenseRef-scancode-other-permissive",
"CNRI-Python",
"LicenseRef-scancode-warranty-disclaimer",
"GPL-2.0-or-later",
"Python-2.0",
"GPL-3.0-only",
"CC-BY-4.0",
"LicenseRef-scancode-jpython-1.1",
"AFL-2.1",
"JSON",
"WTFPL",
"MIT",
"LicenseRef-scancode-generic-exception",
"LicenseRef-scancode-jython",
"GPL-3.0-or-later",
"LicenseRef-scancode-python-cwi",
"BSD-3-Clause",
"LGPL-3.0-or-later",
"Zlib",
"LicenseRef-scancode-free-unknown",
"Classpath-exception-2.0",
"LicenseRef-scancode-proprietary-license",
"GPL-1.0-or-later",
"LGPL-2.0-or-later",
"MPL-2.0",
"ISC",
"GPL-2.0-only",
"ZPL-2.1",
"BSL-1.0",
"Apache-2.0",
"LGPL-2.0-only",
"LicenseRef-scancode-public-domain",
"Xnet",
"BSD-2-Clause"
] | permissive | cloudera/hue | b42343d0e03d2936b5a9a32f8ddb3e9c5c80c908 | dccb9467675c67b9c3399fc76c5de6d31bfb8255 | refs/heads/master | 2023-08-31T06:49:25.724501 | 2023-08-28T20:45:00 | 2023-08-28T20:45:00 | 732,593 | 5,655 | 2,244 | Apache-2.0 | 2023-09-14T03:05:41 | 2010-06-21T19:46:51 | JavaScript | UTF-8 | Python | false | false | 1,402 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2018-06-06 18:55
from __future__ import unicode_literals
from django.conf import settings
import django.core.validators
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('oozie', '0004_initial'),
]
operations = [
migrations.AddField(
model_name='link',
name='child',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='parent_node', to='oozie.Node', verbose_name=b''),
),
migrations.AddField(
model_name='link',
name='parent',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='child_node', to='oozie.Node'),
),
migrations.AddField(
model_name='job',
name='owner',
field=models.ForeignKey(help_text='Person who can modify the job.', on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL, verbose_name='Owner'),
),
migrations.AddField(
model_name='history',
name='job',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='oozie.Job'),
),
]
| [
"romain.rigaux@gmail.com"
] | romain.rigaux@gmail.com |
06d58311b1ea626d1a63e334daea32050b9d3781 | 0c06237432abf0ebbcde87bb1c05d290e1400da6 | /app/graph/views.py | 6a26b0596deaa5ed12a913553ee8dc693b17d488 | [] | no_license | stevefusaro/timelight | a03e7bda9d3e4977998a460a2fc277d9f1d00648 | 67783e373dcd549b856b03db37b09fd8d297ad67 | refs/heads/master | 2021-01-11T02:58:56.550641 | 2017-07-07T20:12:37 | 2017-07-07T20:12:37 | 70,871,959 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,370 | py | from rest_framework import authentication
from rest_framework.viewsets import ViewSet
from rest_framework.decorators import list_route
from rest_framework.response import Response
from neo4j.v1 import GraphDatabase, basic_auth
driver = GraphDatabase.driver("bolt://localhost:7687", auth=basic_auth("neo4j", "ezpass"), encrypted=False)
def _run_query(query, params=None):
params = params or {}
resp = []
with driver.session() as session:
with session.begin_transaction() as tx:
for row in tx.run(query):
resp.append(row)
return resp
class GraphApi(ViewSet):
authentication_classes = (authentication.TokenAuthentication,)
permission_classes = ()
@list_route(methods=['get'])
def q(self, request):
query = "MATCH (a:Person) return a"
return Response(_run_query(query))
@list_route(methods=['get'])
def label_nodes(self, request):
label = request.GET.get('label')
assert label, 'Label is required in GET'
query = "MATCH (person:{label}) RETURN person LIMIT 200".format(label=label)
rows = _run_query(query, params={'label': label})
nodes = [row[0].__dict__ for row in rows] # keys: labels, properties, id
for node in nodes:
node['labels'] = list(node['labels']) # convert from set
return Response(nodes)
| [
"sfusaro1@gmail.com"
] | sfusaro1@gmail.com |
90bc1bde27330e94fc612e4da753803e61b9d6f6 | eb5c9aa97ecaded5f68167fc0220539ba3e4b1b7 | /Q28_Implement strStr()_ver2.py | d005f2e4cecbac082c5effc5dc5c19dfeb793a03 | [] | no_license | Annie-Chu/practice-weekly | 27e7b518a08f7a69f07a1c1592050cbad014bddc | 07c7dbbfa1465eae650d3b58e99bbcc2ef211226 | refs/heads/master | 2022-10-20T00:09:35.275016 | 2020-06-11T06:58:08 | 2020-06-11T06:58:08 | 263,295,443 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 273 | py | def strStr(haystack: str, needle: str) -> int:
if needle == '':
return 0
elif needle not in haystack:
print(-1)
else:
string = haystack.split(needle)
print(len(string[0]))
if __name__ == '__main__':
strStr("hello", "ll") | [
"anniechu65@gmail.com"
] | anniechu65@gmail.com |
c2a62a2b0eee72815aacb4a39fe389383b1c3109 | d7980f818fa101ca7c1e86df3b5dd39c75a58daf | /swmif/urls.py | c301bff1c785b94101ce3555c727b91927290941 | [] | no_license | HarshSonawane/SmartIF | dbe39e35b3d6ac1e45d1255e1080c9a1dd80f929 | ba91f421e28dad7372c7db503e109cba8c193518 | refs/heads/master | 2022-12-19T01:24:35.082665 | 2020-09-20T06:11:18 | 2020-09-20T06:11:18 | 227,466,838 | 0 | 1 | null | 2020-10-01T06:00:51 | 2019-12-11T21:49:23 | JavaScript | UTF-8 | Python | false | false | 1,257 | py | """swmif URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
from django.conf import settings
from django.conf.urls.static import static
from . import views
urlpatterns = [
path('', views.index, name='landing'),
path('user/',include('user.urls')),
path('admin/', admin.site.urls),
path('accounts/',include('allauth.urls')),
path('rest-auth/', include('rest_auth.urls')),
path('rest/', include('rest.urls')),
]
urlpatterns = urlpatterns + static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)
urlpatterns = urlpatterns + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT) | [
"sonawaneharshwardhan@gmail.com"
] | sonawaneharshwardhan@gmail.com |
3fc4cd0005e2760cac083464668dae41e0b0d4f9 | f8b585a7132343a5da159d32966111fedd39e3d8 | /LogDefer.py | 509ca98e9057c2cf37db6a77a433b52e419dcccd | [] | no_license | mikep/LogDefer | 54e471ca5d607a51547691599af86d44f96aa3b4 | e12de81eba399a433410196605f3b66643dc0833 | refs/heads/master | 2016-09-06T20:07:22.536529 | 2013-12-22T00:50:08 | 2013-12-22T00:50:08 | 15,367,887 | 1 | 2 | null | null | null | null | UTF-8 | Python | false | false | 4,065 | py | import json
import os
import re
import time
__version__ = "0.1.0"
class LogDefer(object):
"""
Generate log object conpatible with log-defer-viz
https://github.com/hoytech/Log-Defer-Viz
"""
def __init__(self, options={}):
self.levels = (40, 30, 20, 10)
self.message = {
'start': time.time(),
'logs': [],
'timers': {},
'data': {},
}
def add_message(self, level='30', message="", data=None, *args):
""" Add message to log object """
log = [self._get_et(), level, message]
if data:
if args:
for arg in args:
data = dict(list(data.items()) + list(arg.items()))
log.append(data)
self.message['logs'].append(log)
def timer(self, name=None):
"""
Add timer to log object, If timer already
exists, set the end time.
"""
self.name = name
if name and name not in self.message['timers']:
self.message['timers'][name] = {
'start': self._get_et(),
'name': name,
}
else:
self.message['timers'][name]['end'] = self._get_et()
return self
def __enter__(self):
self.timer(self.name)
def __exit__(self, a, b, c):
self.timer(self.name)
def data(self, d=None):
""" Add data to log object """
if d:
self.message['data'] = dict(
list(self.message['data'].items()) + list(d.items())
)
def finalize_log(self):
""" Format and return the log object for logging. """
self.__format_log_message_output__()
return self.__log_message_json__()
def __format_log_message_output__(self):
# Clean up, log-defer-viz doesn't like empty objects.
for key in ('logs', 'timers', 'data'):
if self.message[key] == [] or self.message[key] == {}:
del self.message[key]
# Convert timer to list.
if 'timers' in self.message:
timers = []
for timer in self.message['timers']:
timers.append([
self.message['timers'][timer]['name'],
self.message['timers'][timer]['start'],
self.message['timers'][timer].get('end', self._get_et())
])
self.message['timers'] = timers
# Record end time.
self.message['end'] = self._get_et()
def __log_message_json__(self):
try:
return json.dumps(self.message)
except:
def serialize_fix(m):
try:
for i, x in enumerate(m):
try:
if type(m) == dict:
json.dumps(m[x])
else:
json.dumps(x)
except:
if type(m) == dict:
m[x] = serialize_fix(m[x])
elif type(m) == list:
m[i] = serialize_fix(x)
else:
m[x] = str(x)
return m
except:
return str(m)
return json.dumps(serialize_fix(self.message))
# Log level functions
def error(self, message='', data=None, *args):
self.add_message(10, message, data, *args)
def warn(self, message='', data=None, *args):
self.add_message(20, message, data, *args)
def info(self, message='', data=None, *args):
self.add_message(30, message, data, *args)
def debug(self, message='', data=None, *args):
self.add_message(40, message, data, *args)
# Util functions
def _get_et(self):
"""
log-defer-viz uses time since the start time in logs and timers
"""
return time.time() - self.message['start']
| [
"michael.pucyk@gmail.com"
] | michael.pucyk@gmail.com |
3fce7e6e7ffee44c337a0c83125d2ce5f09a5280 | 49e5c03d59b9b7671f6be1780444cccd6ef14e7e | /maple/utils.py | a2c13c7246640f7c79516edfb77a8839724b6d00 | [] | no_license | isled/backups | e2dd176adaed63d3c41e9a4e9377a012d192976f | 94568f9582dfea2aff9898ef834c8fb583deec87 | refs/heads/master | 2021-07-10T20:15:56.087030 | 2017-10-12T11:02:58 | 2017-10-12T11:02:58 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 987 | py | import typing
class MapleError(Exception):
"""
maple 基础错误类。
所有错误类均基于此类。
"""
pass
def ensure_bytes(value: typing.Any) -> bytes:
"""
确保传入变量是字节对象
:param value: 任意类型
:return: bytes
"""
if isinstance(value, bytes):
return value
if isinstance(value, bytearray):
return bytes(value)
if value is None:
return b""
if not isinstance(value, str):
str_value = str(value)
else:
str_value = value
return str_value.encode('utf-8')
def ensure_str(value: typing.Any) -> str:
"""
确保传入变量是字符串对象
:param value: 任何类型
:return: str
"""
if isinstance(value, str):
return value
if value is None:
return ''
if isinstance(value, (bytes, bytearray)):
str_value = value.decode('utf-8')
else:
str_value = value
return str(str_value)
| [
"ymkwfn1688@qq.com"
] | ymkwfn1688@qq.com |
b2f8bcc16221469eed37b0ab56a7404cf8dc0a34 | 3d709e8a007a8c46c8ece0b63407551a06a47bf1 | /app/accounts/__init__.py | 9bbf50c3aafc7efb11440d6c8e8559e6b65cab46 | [] | no_license | lassilaiho/recipe-book | 4f74422790ed9bb7d810d1fdff1ebf2c0610a6c9 | 379dd7b52ef15272dfdba24e668f4ddc7fc896b3 | refs/heads/master | 2023-06-21T22:37:29.983415 | 2021-07-24T13:24:24 | 2021-07-24T13:24:24 | 233,632,351 | 0 | 0 | null | 2021-03-20T02:44:35 | 2020-01-13T15:49:03 | HTML | UTF-8 | Python | false | false | 172 | py | from app.main import login_manager
from app.accounts.models import Account
@login_manager.user_loader
def load_user(account_id):
return Account.query.get(account_id)
| [
"lassi.laiho0@gmail.com"
] | lassi.laiho0@gmail.com |
e400e3f7cfee1b0808a278fe8e94120ceb12437e | 692b907d07eee8ce3ee32a1fda74b6d92fd6c548 | /tests/models/validators/v1_3_0/jsd_d9bdb9034df99dba.py | 4f3ce36a7f1b34bd26fe19e07e1dc62094323ae1 | [
"MIT"
] | permissive | AltusConsulting/dnacentercli | 04c9c7d00b25753a26c643994388dd4e23bf4c54 | 26ea46fdbd40fc30649ea1d8803158655aa545aa | refs/heads/master | 2022-12-16T04:50:30.076420 | 2020-07-17T22:12:39 | 2020-07-17T22:12:39 | 212,206,213 | 0 | 0 | MIT | 2022-12-08T06:39:49 | 2019-10-01T21:50:42 | Python | UTF-8 | Python | false | false | 2,451 | py | # -*- coding: utf-8 -*-
"""DNA Center Get Site Count data model.
Copyright (c) 2019 Cisco and/or its affiliates.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
from __future__ import (
absolute_import,
division,
print_function,
unicode_literals,
)
import fastjsonschema
import json
from dnacentersdk.exceptions import MalformedRequest
from builtins import *
class JSONSchemaValidatorD9BdB9034Df99Dba(object):
"""Get Site Count request schema definition."""
def __init__(self):
super(JSONSchemaValidatorD9BdB9034Df99Dba, self).__init__()
self._validator = fastjsonschema.compile(json.loads(
'''{
"properties": {
"response": {
"description":
"Response",
"type": [
"string",
"null"
]
},
"version": {
"description":
"Version",
"type": [
"string",
"null"
]
}
},
"type": "object"
}'''.replace("\n" + ' ' * 16, '')
))
def validate(self, request):
try:
self._validator(request)
except fastjsonschema.exceptions.JsonSchemaException as e:
raise MalformedRequest(
'{} is invalid. Reason: {}'.format(request, e.message)
)
| [
"wastorga@altus.co.cr"
] | wastorga@altus.co.cr |
dd3c8aa9ff1f411e97b2efef7666a067449b8770 | cee089aebd43dabcdc073fc4c1f3ec38bccd2a91 | /Decison-Tree/Naive Decision Tree/decisionTree.py | 9882f7f79be1f2bffea793117203b790748e8306 | [] | no_license | ramarvab/DataMining_FinalProject | 73f82438c87becfbbf5b5a21734dc6dc2b475873 | 28880b696a154fa0e263d04ac2c07436c118af96 | refs/heads/master | 2020-09-16T04:56:09.843205 | 2016-09-07T19:19:58 | 2016-09-07T19:19:58 | 67,636,260 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,098 | py | import numpy
import math
class tree_node(object):
def __init__(self):
self.leaf = 0
self.category = -1
self.feature = 0
self.value = -1
self.right = None
self.left = None
def get_entropy(catlist, categories):
l_cat = len(catlist)
if l_cat == 0:
return 0
catlist = map(int, catlist)
major = 0.0
cat = [0] * categories
for i in catlist:
cat[i] += 1
for i in cat:
k = float(i)/float(l_cat)
if k != 0:
major -= k*math.log(k)
return major
def get_gini(catlist, categories):
l_cat = len(catlist)
if l_cat == 0:
return 0
catlist = map(int, catlist)
major = 0.0
cat = [0] * categories
for i in catlist:
cat[i] += 1
for i in cat:
major += i*i
major /= float(l_cat*l_cat)
return 1.0-major
def get_values(data, attr, points_index):
vals = [data[i][attr] for i in points_index]
vals.sort()
parts = int(math.ceil(math.log(len(vals),2)))
#parts = len(vals)/2
#parts = len(vals)/2
#vals = list(set(vals[::parts]))
vals = list(set(vals))
vals.sort()
return vals
def get_mandv(data, features, attr, points_index, method, categories):
values = get_values(data, attr, points_index)
mandv = []
for value in values:
l_set = [data[pt][features] for pt in points_index if data[pt][attr] <= value]
r_set = [data[pt][features] for pt in points_index if data[pt][attr] > value]
left_len = len(l_set)
right_len = len(r_set)
major = 0
if left_len == 0 or right_len == 0:
major = 1000
else:
if method == "g":
major = get_gini(r_set, categories)*len(r_set) + get_gini(l_set, categories)*len(l_set)
if method == "i":
major = get_entropy(r_set, categories)*len(r_set) + get_gini(l_set, categories)*len(l_set)
major = float(major)/float(len(points_index))
mandv.append([major, value])
mandv.sort(key=lambda x:x[0])
return mandv[0][0], mandv[0][1]
def best_attr(data, features, points_index, method, categories):
p_set = [data[pt][features] for pt in points_index]
if method == "g":
p_measure = get_gini(p_set, categories)
else:
p_measure = get_entropy(p_set, categories)
attr_data = []
for i in range(features):
major, val = get_mandv(data, features, i, points_index, method, categories)
if p_measure > major:
attr_data.append([i, major, val])
attr_data.sort(key=lambda x: x[1], reverse=False)
if len(attr_data) == 0:
return -1, -1
if attr_data[0][1] == 1000:
return -1, -1
return attr_data[0][0], attr_data[0][2]
def decision_tree(data, features, points_index, method, categories):
# Generated new node
node = tree_node()
# checking for termination condition
distinct_categories = set()
cat_list = [0]*categories
for i in points_index:
distinct_categories.add(data[i][features])
cat_list[int(data[i][features])] += 1
distinct_categories = list(distinct_categories)
if len(distinct_categories) == 1:
node.leaf = 1
node.category = distinct_categories[0]
return node
attr, value = best_attr(data, features, points_index, method, categories)
if attr == -1:
node.leaf = 1
cat_list = zip(cat_list, range(categories))
cat_list.sort(key=lambda x: x[0], reverse=True)
node.category = cat_list[0][1]
return node
node.leaf = 0
node.feature = attr
node.value = value
right_pt, left_pt = [], []
for i in points_index:
if data[i][attr] <= value:
left_pt.append(i)
else:
right_pt.append(i)
l_len = len(left_pt)
r_len = len(right_pt)
if l_len != 0 and r_len != 0:
node.left = decision_tree(data, features, left_pt, method, categories)
node.right = decision_tree(data, features, right_pt, method, categories)
elif l_len == 0:
node = decision_tree(data, features, right_pt, method, categories)
elif r_len == 0:
node = decision_tree(data, features, left_pt, method, categories)
return node
'''
def best_attr(data, features, points_index, method, categories):
# random approach for testing purpose
random_attr = randint(0, features-1)
count = 0
sumi = 0
for i in points_index:
count += 1
sumi += data[i][random_attr]
if count != 0:
val = sumi/count
else:
val = sumi
return random_attr, val
''' | [
"ramarvab@149-161-153-43.dhcp-bl.indiana.edu"
] | ramarvab@149-161-153-43.dhcp-bl.indiana.edu |
66e3f4caa9591de9d0af524ee3a95b967482a74a | 0a9e171dbce02947a88290ee16e0fb90e796428a | /Linear_Regression/multi_linreg.py | e9f2773099fcceb018b319c0c3abbda27822ce9d | [] | no_license | sanjmohan/MLExercises | e3416990215a0c101f44093ddaf42816ea8d742c | a71698c794a858496d250ea5a4f9dc6ff55cf171 | refs/heads/master | 2021-01-12T05:32:42.817911 | 2020-04-21T22:09:32 | 2020-04-21T22:09:32 | 77,121,207 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,569 | py | # Multiple Linear Regression on Housing Price given 13 attributes
# First time using theano!
import numpy as np
import theano
from theano import tensor as T
from theano import function
import matplotlib.pyplot as plt
def load_data():
# Features separated by space, examples separated by line breaks
# Load first 13 values as feature, load last value as target
# Matrix of all examples (row = example, column = feature)
x = []
# matrix of all target values
y = []
# read file of data
with open('Data/housing_data.txt') as f:
for line in f:
if line != "":
ex = [float(i) for i in line.split(' ') if i != '']
# first "attribute" is 1 as placeholder for theta_0
x.append([1] + ex[:-1])
y.append(ex[-1])
return x, y
def normalize(data):
# Rescale features to lie on range [0, 1]
# Transpose => each row is a feature
xT = np.asarray(data).T
# Skip first placeholder "feature"
for i in range(1, len(xT)):
feature = xT[i]
min_val = min(feature)
max_val = max(feature)
feature = (feature - min_val) / (max_val - min_val)
xT[i] = feature
return (xT.T).tolist()
x_in, y_in = load_data()
x_in = normalize(x_in)
num_test = 100
num_train = len(x_in) - num_test
# leave last num_test examples for testing
train_x = np.asarray(x_in[:-num_test])
train_y = np.asarray(y_in[:-num_test]).reshape(num_train, 1)
test_x = np.asarray(x_in[-num_test:])
test_y = np.asarray(y_in[-num_test:]).reshape(num_test, 1)
print("Data Size: %d" % len(x_in))
print("Number of Features: %d" % (len(x_in[0])-1)) # first "feature" is placeholder
print("Training Size: %d" % num_train)
print("Test Size: %d" % num_test)
# shared var - column vector with length = number of independent attributes
theta = theano.shared(np.zeros((train_x.shape[1], 1)))
# symbolic inputs to cost function
x = T.matrix('x')
y = T.matrix('y')
# Compute predictions (feedforward, hypothesis, etc.)
pred = T.dot(x, theta)
# least mean square cost function
c = 0.5 * T.mean((pred - y) ** 2)
# function([symbolic inputs], output, name=name)
cost = theano.function([x, y], c, name="cost")
# least mean square cost partial derivatives
# grad w/respect to theta_j = 1/m * sum( (x_i - y_i) * x_i_j )
# gc = 1/num_train * T.dot(x.T, (pred - y))
# gradient descent update function
# learning rate
lr = 0.01
print("Learning Rate: %f" % lr)
# update format: (shared var to update, expression representing update)
# featuring symbolic differentiation!
updates = [(theta, theta - lr * T.grad(c, theta))]
grad_desc = theano.function([x, y], theta, updates=updates, name="grad_desc")
# iterate through gradient descent fixed number of times
# list of costs at each iteration
accuracy = []
iters = 3000
for i in range(iters):
grad_desc(train_x, train_y)
accuracy.append(cost(train_x, train_y))
if i % (iters // 20) == 0 or i == iters - 1:
print("Iteration %d" % (i+1))
print("Minimum Training Cost: %f" % min(accuracy))
print("Test Cost: %f" % cost(test_x, test_y))
# show (hopefully) decreasing cost
plt.plot(range(iters), accuracy)
plt.show()
# 300 iters, lr = 0.000006: min cost = 36
# (higher lr explodes)
# 300 iters w/normalization, lr = 0.000006: min cost = 261
# 300 iters w/normalization, lr = 0.01: min cost = 35, test cost = 16
# (higher lr explodes)
# 3000 iters, lr = 0.000006: min cost = 27
# 3000 iters w/normalization: min cost = 178
# 3000 iters w/normalization, lr = 0.01: min cost = 15, test cost = 10 | [
"sam2mohan@gmail.com"
] | sam2mohan@gmail.com |
c61b3a1fac58f75d6fca03e86ed4fb7ac6661149 | 63b6c1ff3827055d05a92aaf9c6cfb5c5617bccc | /compositionDuMahal/admin.py | bc70c0c1f2c6aeef1f0c5e2eee5b48c98b9a5bda | [] | no_license | Slam0810/django | 990deb8f615036ead078304c3298f246a8e5002e | d81923534c1eb0e330de91433fed72a42b922ee6 | refs/heads/master | 2023-03-27T05:32:24.776629 | 2021-03-23T08:59:34 | 2021-03-23T08:59:34 | 345,477,458 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 884 | py | from django.contrib import admin
from .models import Produit, Contact, Reservation, Presentation
# Register your models here.
#admin.site.register(Produit)
admin.site.register(Contact)
class ReservationAdmin(admin.ModelAdmin):
list_display = ('created_at', 'contacted')
readonly_fields = ('created_at', 'contacted')
def has_add_permission(self, request):
return False
class PresentationAdmin(admin.ModelAdmin):
list_display = ('nom', 'fonction', 'metier')
class ProduitAdmin(admin.ModelAdmin):
list_display =('nom', 'image', 'description','etat', 'caracteristic', 'date')
list_filter = ('nom',)
date_hierarchy = 'date'
ordering = ('date',)
search_fields = ('nom', 'etat', 'caracteristic')
admin.site.register(Produit, ProduitAdmin)
admin.site.register(Presentation, PresentationAdmin)
admin.site.register(Reservation, ReservationAdmin)
| [
"birama.tour@gmail.com"
] | birama.tour@gmail.com |
609e132e00c44ec7d44b98d5322c72c6ba7dd196 | db5952a4ecb74177d8acdcc1440af65a540ba07e | /abbrev.py | 6df1f73e9d1fc70481055894247005d7c95acf5a | [] | no_license | dividedmind/moviesorter | a39aaf03f5ac0984d4a57cce2fcf87b320a59bdf | f2d57d184d63ccc35bd0d1b10dde7de803da5aae | refs/heads/master | 2021-01-23T15:42:35.272179 | 2011-08-22T11:55:10 | 2011-08-22T11:55:10 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 211 | py | # -*- coding: utf-8 -*-
from google.appengine.ext import webapp
register = webapp.template.create_template_register()
@register.filter
def abbrev(name):
return ''.join([word[0] for word in name.split()])
| [
"divided.mind@gmail.com"
] | divided.mind@gmail.com |
bf4537cf065e4166235f72754c003c22dff1f73a | 7740035c7af9b8dcf1a82f0c1b46a7c6dd18f8f4 | /blog_project/blogs/migrations/0001_initial.py | 39215dc6daab96b05125b0c07573cf9e2c715387 | [] | no_license | Arijit1303/Django-Projects | 2df930d84ace7637eeaa4d5e3bba161fbce9a4f4 | 2125894e126b5f7089abf7d6d020fb1a82940463 | refs/heads/master | 2021-05-01T17:02:26.912653 | 2018-02-10T07:31:01 | 2018-02-10T07:31:01 | 120,993,324 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 785 | py | # Generated by Django 2.0.1 on 2018-01-15 10:42
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Post',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=200)),
('text', models.TextField()),
('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
| [
"noreply@github.com"
] | noreply@github.com |
0acfd5a67da2070d576a8fb6eb33f195f4b5c0d5 | 1debf486be97ea914c49f69208ab56b4a4d3c013 | /lab.py | 894ef26c5241bbbd05400b303fb2abde0df3dc23 | [] | no_license | XingyuHe/Monopoly | ee483387a723dc8ce2511d75ba86021db6a5acde | 00e9d7e963af29501daac988b966bc03ab276353 | refs/heads/master | 2021-08-16T16:35:46.013197 | 2017-11-20T04:22:24 | 2017-11-20T04:22:24 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 388 | py | import json
from cs110graphics import *
monopoly_data = json.load(open("monopoly.json"))
print (monopoly_data)
options = monopoly_data['CHANCE']['options']
print(options)
print(len(options))
print(options[str(2)])
# def program(win):
# win.set_height(1600)
# win.set_width(1600)
# rect1 = Rectangle(win)
# win.add(rect1)
# def main():
# StartGraphicsSystem(program) | [
"32248504+PeterWiIIiam@users.noreply.github.com"
] | 32248504+PeterWiIIiam@users.noreply.github.com |
a5b8f6d17d44bb21c75ef0b3d113fa8cb8f0dfdc | 90be82216cd48b95e8ebfd0054a048ea91707872 | /organisations/migrations/0002_auto_20200828_1740.py | c9a44789b14b923443120b8e9961778cd8f797de | [
"MIT"
] | permissive | rubberducklive/tcm_api | 9c774e50e1e04f4c4c5735871ef38d35b013a5e0 | 53d2b533e3f9251cce49bd4c1b8e9e65a03eaf04 | refs/heads/main | 2022-12-06T17:30:29.807552 | 2020-08-28T17:49:19 | 2020-08-28T17:49:19 | 290,266,719 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 440 | py | # Generated by Django 3.0.9 on 2020-08-28 17:40
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('organisations', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='organisation',
name='name',
field=models.CharField(max_length=255, unique=True, verbose_name='Name of the organisation'),
),
]
| [
"ulhas.sm@gmail.com"
] | ulhas.sm@gmail.com |
0628946d4e9a280e8355cd0413d75bd4a43845dc | 84e5297e214dd94105df7bbe627a506773d70224 | /Assignment2/dnn_tf.py | 478f858ded57e45f0034d15cb734f6130922bf28 | [] | no_license | toannguyen1904/VietAI-ML-Foundation-5 | b02b1463d0b820088fa7400112d41d4291357172 | 5adcd49c88e4c886b15973254d56c07c15a8660d | refs/heads/master | 2022-05-16T10:27:27.570181 | 2020-03-16T05:37:58 | 2020-03-16T05:37:58 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,419 | py | """dnn_tf_sol.py
Solution of deep neural network implementation using tensorflow
Author: Kien Huynh
"""
import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt
from util import *
from dnn_np import test
import pdb
def bat_classification():
# Load data from file
# Make sure that bat.dat is in data/
train_x, train_y, test_x, test_y = get_bat_data()
train_x, _, test_x = normalize(train_x, train_x, test_x)
test_y = test_y.flatten().astype(np.int32)
train_y = train_y.flatten().astype(np.int32)
num_class = (np.unique(train_y)).shape[0]
# DNN parameters
hidden_layers = [100, 100, 100]
learning_rate = 0.01
batch_size = 200
steps = 2000
# Specify that all features have real-value data
feature_columns = [tf.feature_column.numeric_column("x", shape=[train_x.shape[1]])]
# Available activition functions
# https://www.tensorflow.org/api_guides/python/nn#Activation_Functions
# tf.nn.relu
# tf.nn.elu
# tf.nn.sigmoid
# tf.nn.tanh
activation = tf.nn.relu
# [TODO 1.7] Create a neural network and train it using estimator
# Some available gradient descent optimization algorithms
# https://www.tensorflow.org/api_docs/python/tf/train#classes
# tf.train.GradientDescentOptimizer
# tf.train.AdadeltaOptimizer
# tf.train.AdagradOptimizer
# tf.train.AdagradDAOptimizer
# tf.train.MomentumOptimizer
# tf.train.AdamOptimizer
# tf.train.FtrlOptimizer
# tf.train.ProximalGradientDescentOptimizer
# tf.train.ProximalAdagradOptimizer
# tf.train.RMSPropOptimizer
# Create optimizer
# optimizer = tf.train.GradientDescentOptimizer(learning_rate=learning_rate)
# optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.01)
optimizer = tf.train.MomentumOptimizer(learning_rate=learning_rate, momentum=0.005)
# build a deep neural network
# https://www.tensorflow.org/api_docs/python/tf/estimator/DNNClassifier
classifier = tf.estimator.DNNClassifier(feature_columns=feature_columns,
hidden_units=hidden_layers,
n_classes=num_class,
activation_fn=activation,
optimizer=optimizer)
# Define the training inputs
# https://www.tensorflow.org/api_docs/python/tf/estimator/inputs/numpy_input_fn
train_input_fn = tf.estimator.inputs.numpy_input_fn(x = {"x": train_x},
y = train_y,
batch_size=batch_size,
shuffle=True,
num_epochs=None)
# Train model.
classifier.train(
input_fn=train_input_fn,
steps=steps)
# Define the test inputs
test_input_fn = tf.estimator.inputs.numpy_input_fn(
x={"x": test_x},
y=test_y,
num_epochs=1,
shuffle=False)
# Evaluate accuracy.
predict_input_fn = tf.estimator.inputs.numpy_input_fn(
x={"x": test_x},
num_epochs=1,
shuffle=False)
y_hat = classifier.predict(input_fn=predict_input_fn)
y_hat = list(y_hat)
y_hat = np.asarray([int(x['classes'][0]) for x in y_hat])
test(y_hat, test_y)
def mnist_classification():
# Load data from file
# Make sure that fashion-mnist/*.gz is in data/
train_x, train_y, val_x, val_y, test_x, test_y = get_mnist_data(1)
train_x, val_x, test_x = normalize(train_x, train_x, test_x)
train_y = train_y.flatten().astype(np.int32)
val_y = val_y.flatten().astype(np.int32)
test_y = test_y.flatten().astype(np.int32)
num_class = (np.unique(train_y)).shape[0]
pdb.set_trace()
# DNN parameters
hidden_layers = [100, 100, 100]
learning_rate = 0.01
batch_size = 200
steps = 500
# Specify that all features have real-value data
feature_columns = [tf.feature_column.numeric_column("x", shape=[train_x.shape[1]])]
# Choose activation function
activation = tf.nn.sigmoid
# Some available gradient descent optimization algorithms
# TODO: [YC1.7] Create optimizer
optimizer = tf.train.MomentumOptimizer(learning_rate=learning_rate, momentum=0.005)
# build a deep neural network
classifier = tf.estimator.DNNClassifier(feature_columns=feature_columns,
hidden_units=hidden_layers,
n_classes=num_class,
activation_fn=activation,
optimizer=optimizer)
# Define the training inputs
# https://www.tensorflow.org/api_docs/python/tf/estimator/inputs/numpy_input_fn
train_input_fn = tf.estimator.inputs.numpy_input_fn(x = {"x": train_x},
y = train_y,
batch_size=batch_size,
shuffle=True,
num_epochs=None)
# Train model.
classifier.train(
input_fn=train_input_fn,
steps=steps)
# Define the test inputs
test_input_fn = tf.estimator.inputs.numpy_input_fn(
x={"x": test_x},
y=test_y,
num_epochs=1,
shuffle=False)
# Evaluate accuracy.
predict_input_fn = tf.estimator.inputs.numpy_input_fn(
x={"x": test_x},
num_epochs=1,
shuffle=False)
y_hat = classifier.predict(input_fn=predict_input_fn)
y_hat = list(y_hat)
y_hat = np.asarray([int(x['classes'][0]) for x in y_hat])
test(y_hat, test_y)
if __name__ == '__main__':
np.random.seed(2017)
plt.ion()
bat_classification()
mnist_classification()
| [
"47108512+ChrisZangNam@users.noreply.github.com"
] | 47108512+ChrisZangNam@users.noreply.github.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.