text stringlengths 8 6.05M |
|---|
from django.conf.urls import patterns, include, url
from apps.metrics import views as metricsView
urlpatterns = patterns('',
url(r'^$', metricsView.dashboard, name="dashboard"),
url(r'chartDataJson/$', metricsView.chardDataJSON, name="chart_data_json"),
)
|
# Generated by Django 2.0.6 on 2018-06-19 20:17
from django.conf import settings
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('home', '0005_comments'),
]
operations = [
migrations.RenameModel(
old_name='Comments',
new_name='Comment',
),
]
|
from django import forms
from .models import *
class LoginForm(forms.ModelForm):
password = forms.CharField(widget=forms.PasswordInput)
class Meta:
model = Login
fields = ('user_name', 'password',)
class detailForm(forms.ModelForm):
class Meta:
model = Detail
fields = ('state','city', 'address',) |
import sys
class Node:
def __init__(self,value,parent,children=[]):
self.value = value
self.parent = parent
self.children = children
def add(self, entry,l=0):
print l,children
r = False
if entry[0] != self.value[0]:
f = False
for child in self.children:
if entry[0] == child.value[0]:
f = True
elif entry[1][0] == child.value[1][1]:
print "adding ", entry, " to ", child.value,"child of",self.value
r = r or child.add(entry,l+1)
if not f:
self.children.append(Node(entry,self))
r = True
return r
def prnt(self,level=0):
print self.value
for child in self.children:
print " "*level,
child.prnt(level+1)
def process(line):
line = line.strip()
words = [(j,i[0]+i[-1]) for j,i in enumerate(line.split(','))]
nodes = [Node(i,None) for i in words]
for word in words:
try:
nodes[0].add(word)
except Exception,e:
print e
sys.exit()
nodes[0].prnt()
if len(sys.argv) < 2:
fname = 'test.txt'
else:
fname = sys.argv[1]
with open(fname,'r') as f:
for line in f:
process(line)
break
|
"""A program to generate Spotify playlist that adds songs from your liked videos on YouTube."""
# Log into YouTube
# Fetch liked YouTube videos
# Create new Spotify playlist
# Search for song on Spotify
# Add song to new Spotify playlist
import json
import os
import google_auth_oauthlib.flow
import googleapiclient.discovery
import googleapiclient.errors
import requests
import youtube_dl
from exceptions import ResponseException
from secrets import spotify_token, spotify_user_id
class Playlist:
def __self__(self):
self.user_id = spotify_user_id
self.youtube_client = self.get_youtube_client()
self.all_song_info = {}
def get_youtube_client(self):
"""Log in to YouTube, return YouTube client."""
os.environ["OAUTHLIB_INSECURE_TRANSPORT"] = "1"
api_service_name = "youtube"
api_version = "v3"
client_secrets_file = "client_secret.json"
# Get YouTube credentials and create API client
scopes = ["https://www.googleapis.com/auth youtube.readonly"]
flow = google_auth_oauthlib.flow.InstalledAppFlow.from_client_secrets_file(
client_secrets_file, scopes)
credentials = flow.run_console()
# Youtube DATA API
youtube_client = googleapiclient.discovery.build(
api_service_name, api_version, credentials=credentials)
return youtube_client
def get_liked_vids(self):
"""Get liked videos from YouTube; create dictionary with song info."""
request = self.youtube_client.videos().list(
part="snippet,contentDetails,statistics",
myRating="like")
response = request.execute()
# Loop through videos to get important information about songs
for item in response["items"]:
video_title = item["snippet"]["title"]
youtube_url = "https://www.youtube.com/watch?v={}".format(item["id"])
# Use youtube_dl to collect song name & artist name
video = youtube_dl.YoutubeDL({}).extract_info(youtube_url,
download=False)
song_name = video["track"]
artist = video["artist"]
if song_name is not None and artist is not None:
# Save song info and skip any missing song/artist
self.all_song_info[video_title] = {
"youtube_url": youtube_url,
"song_name": song_name,
"artist": artist,
# Add the URI, append song to playlist
"spotify_uri": self.get_spotify_uri(song_name, artist)
}
def create_spotify_playlist(self):
"""Create a new Spotify playlist; return playlist ID."""
pass
request_body = json.dumps({
"name": "Liked YouTube Songs",
"description": "All Liked Songs from YouTube",
"public": True })
query = f"https://api.spotify.com/v1/users/{self.user_id}/playlists"
response = requests.post(query,
data = request_body,
headers = {
"Content-Type":"application/json",
"Authorization":f"Bearer {spotify_token}"})
response_token = response.json()
# Playlist ID
return response_json["id"]
def get_spotify_uri(self, song_name, artist):
"""Search for the song on Spotify."""
query = "https://api.spotify.com/v1/search?query=track%3A{}+artist%3A{}&type=track&offset=0&limit=20".format(song_name,
artist)
response = requests.get(query,
headers={
"Content-Type": "application/json",
"Authorization": "Bearer {}".format(spotify_token)})
response_json = response.json()
songs = response_json["tracks"]["items"]
# Only use the first song
uri = songs[0]["uri"]
return uri
def add_song(self):
"""Add a song to Spotify playlist."""
# Populate dictionary with liked songs
self.get_liked_vids()
# Collect all URIs
uris = [info["spotify_uri"]
for song, info in self.all_song_info.items()]
# Create new playlists
playlist_id = self.create_spotify_playlist()
# Add all songs into new playlists
request_data = json.dumps(uris)
query = query = "https://api.spotify.com/v1/playlists/{}/tracks".format(playlist_id)
response = requlests.post(query,
data = request_data,
headers = {"Content-Type": "application/json",
"Authorization": "Bearer {}".format(spotify_token)})
# Check for valid response status
if response.status_code != 200:
raise ResponseException(response.status_code)
response_json = response.json()
return response_json
if __name__ == '__main__':
cp = CreatePlaylist()
cp.add_song_to_playlist()
|
# -*- coding: utf-8 -*-
"""
Created on Tue Feb 18 10:28:58 2020
@author: shaun
"""
import numpy as np
import matplotlib.pyplot as plt
import matplotlib
from sympy import *
from integration import *
from math import log10, floor
#set global variables that are changed within the function bessel
# this avoids having to pass more parameters into bessel
besselx=0
M=0
#define integrand within bessel function
def function(x):
global M
global besselx
global test
function=np.cos(M*x-besselx*np.sin(x))
test=1000
return function
#define bessel function
def bessel(m,x):
global besselx
global M
besselx=x
M=m
f=(1/np.pi)*swhole(1000,0,np.pi,function)[2]
return f
#rounds to the appropriate sig fig when calculating density
def round_sig(x, sig):
return round(x, sig-int(floor(log10(abs(x))))-1)
listx=[]
listy=[]
#creates a list of bessel functions with different m values
for m in range(0,20):
listy.append([])
listx.append(m)
for x in range(0,20):
listy[-1].append(bessel(m,x))
#plot these bessel functions with different symbols and colors
figure1=plt.figure(figsize=(20,10))
for y in range(0, len(listy)):
color="C"+str(y)
m=y%12
plt.plot(listx,listy[y],color,marker=m, label="M="+str(y))
#plot
plt.legend()
plt.xlabel("X")
plt.ylabel("Value of bessel function")
plt.title("Bessel function at different values of M")
#creat array for heat map
A=np.zeros([100,100],float)
row=A.shape[0]
col=A.shape[1]
xspace=np.linspace(-1,1,row)
yspace=np.linspace(-1,1,col)
intensitymax=0
#walk through array and sets density at each point in array
for x in range(0,row):
print("We are "+str(100*float(x)/row)+" percent finished")
for y in range(0,col):
xvalue=xspace[x]
yvalue=yspace[y]
#calculate r
r=(xvalue**2+yvalue**2)**0.5
lamb=0.5
k=(2*np.pi)/lamb
if(r!=0):
j=bessel(1,k*r)
intensity=(j/(k*r))**2
if (intensity>intensitymax):
intensitymax=intensity
else:
intensity=1/4.0
A[x][y]=round_sig(intensity,1)
#plot the heat map
figure2=plt.figure(figsize=(5,5))
ax=figure2.add_subplot(1,1,1)
im=ax.imshow(A,vmax=intensitymax)
figure2.suptitle(r"Light intensity Density plot",fontsize=30)
ax.set_xlabel(r"X",fontsize=15)
ax.set_ylabel(r"Y",fontsize=15)
# create legend for heat map
v = np.unique(A.ravel())
#include 0 in the heat map legend
values=[0]
for value in v:
#only shows values greater than 0.01 in the heat map legend
if value>0.01:
values.append(value)
colors = [im.cmap(im.norm(value)) for value in values]
patches = [ matplotlib.patches.Patch(color=colors[i], label="Density {l}".format(l=values[i]) ) for i in range(len(values)) ]
ax.legend(handles=patches, bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0. )
plt.show()
|
import cv2
import numpy as np
from time import time as timer
class VisualMicrohone(object):
def __init__(self, upper, lower, video):
self.capture = self.setup_capture(video)
self.start = 0
self.mask = None
self.area_size_min = None
self.area_size_max = None
self.lower_color_range = np.array(lower, np.uint8)
self.upper_color_range = np.array(upper, np.uint8)
self.amplitudes = []
@staticmethod
def setup_capture(video):
"""Give path to the video or 0 for webcam"""
return cv2.VideoCapture(video)
def escape_on_q(self):
""" Press 'q' on the video to quit the program """
escape = False
if cv2.waitKey(10) & 0xFF == ord('q'):
self.capture.release()
cv2.destroyAllWindows()
escape = True
return escape
def set_color_mask(self, img):
""" Create a mask based on the hsv profile of the image from the video and the selected color """
img_hsv_profile = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
self.mask = cv2.inRange(img_hsv_profile, self.lower_color_range, self.upper_color_range)
self.__erode_mask()
def __erode_mask(self):
""" Erode the pixel near the border in the mask to reduce noise """
kernel = np.ones((5, 5), "uint8")
self.mask = cv2.erode(self.mask, kernel, iterations=1)
def get_contours(self):
""" Get the contours of the objects found with the mask """
(_, contours, hierarchy) = cv2.findContours(self.mask, cv2.RETR_TREE,
cv2.CHAIN_APPROX_SIMPLE)
return contours
def select_area(self, contour):
area = cv2.contourArea(contour)
if self.area_size_min is None:
self.area_size_min = area
if self.area_size_max is None:
self.area_size_max = area
if self.area_size_min <= area <= self.area_size_min:
area = False
return area
def get_movement_amplitude(self, contour):
""" Getting the centroid of the contour and returning its relative coordinates """
centroid = cv2.moments(contour)
cx = int(centroid['m10'] / centroid['m00'])
cy = int(centroid['m01'] / centroid['m00'])
print(cx)
self.amplitudes.append([cx, cy, self.get_time()])
def get_time(self):
return timer() - self.start
def process_video(self):
while self.capture.isOpened():
print("yeah")
self.start = timer()
_, img = self.capture.read()
cv2.imshow("Color Tracking", img)
self.escape_on_q()
self.set_color_mask(img)
contours = self.get_contours()
for _, contour in enumerate(contours):
if self.select_area(contour):
self.get_movement_amplitude(contour)
if __name__ == "__main__":
vm = VisualMicrohone([0, 0, 0], [255, 255, 255], "../resources/chips.mp4")
vm.process_video()
|
#!/usr/bin/python
from os import environ, listdir
print "Content-Type: text/plain"
print
for i in listdir(environ['DIRECTORY']):
print i
|
# %%
"""Kinematic Object Placement"""
import math
import os
import random
import sys
import git
import magnum as mn
import numpy as np
import habitat_sim
from habitat_sim.utils import viz_utils as vut
# %%
file_path = "/home/habitat/habitat/habitat-sim/"
os.chdir(file_path)
%set_env DISPLAY=:0
data_path = os.path.join(file_path, "data")
output_path = os.path.join("/home/habitat/habitat/examples/video/")
# %%
def remove_all_objects(sim):
for id_ in sim.get_existing_object_ids():
sim.remove_object(id_)
def place_agent(sim):
# place our agent in the scene
agent_state = habitat_sim.AgentState()
agent_state.position = [-0.15, -0.7, 1.0]
agent_state.rotation = np.quaternion(-0.83147, 0, 0.55557, 0)
agent = sim.initialize_agent(0, agent_state)
return agent.scene_node.transformation_matrix()
def make_configuration():
# simulator configuration
backend_cfg = habitat_sim.SimulatorConfiguration()
backend_cfg.scene_id = os.path.join(
data_path, "scene_datasets/habitat-test-scenes/apartment_1.glb"
)
assert os.path.exists(backend_cfg.scene_id)
backend_cfg.enable_physics = True
# sensor configurations
# Note: all sensors must have the same resolution
# setup 2 rgb sensors for 1st and 3rd person views
camera_resolution = [544, 720]
sensors = {
"rgba_camera_1stperson": {
"sensor_type": habitat_sim.SensorType.COLOR,
"resolution": camera_resolution,
"position": [0.0, 0.6, 0.0],
"orientation": [0.0, 0.0, 0.0],
},
"depth_camera_1stperson": {
"sensor_type": habitat_sim.SensorType.DEPTH,
"resolution": camera_resolution,
"position": [0.0, 0.6, 0.0],
"orientation": [0.0, 0.0, 0.0],
},
"rgba_camera_3rdperson": {
"sensor_type": habitat_sim.SensorType.COLOR,
"resolution": camera_resolution,
"position": [0.0, 1.0, 0.3],
"orientation": [-45, 0.0, 0.0],
},
}
sensor_specs = []
for sensor_uuid, sensor_params in sensors.items():
sensor_spec = habitat_sim.SensorSpec()
sensor_spec.uuid = sensor_uuid
sensor_spec.sensor_type = sensor_params["sensor_type"]
sensor_spec.resolution = sensor_params["resolution"]
sensor_spec.position = sensor_params["position"]
sensor_spec.orientation = sensor_params["orientation"]
sensor_specs.append(sensor_spec)
# agent configuration
agent_cfg = habitat_sim.agent.AgentConfiguration()
agent_cfg.sensor_specifications = sensor_specs
return habitat_sim.Configuration(backend_cfg, [agent_cfg])
def simulate(sim, dt=1.0, get_frames=True):
# simulate dt seconds at 60Hz to the nearest fixed timestep
print("Simulating " + str(dt) + " world seconds.")
observations = []
start_time = sim.get_world_time()
while sim.get_world_time() < start_time + dt:
sim.step_physics(1.0 / 60.0)
if get_frames:
observations.append(sim.get_sensor_observations())
return observations
# %%
# create the simulators AND resets the simulator
cfg = make_configuration()
try: # Got to make initialization idiot proof
sim.close()
except NameError:
pass
sim = habitat_sim.Simulator(cfg)
agent_transform = place_agent(sim)
# get the primitive assets attributes manager
prim_templates_mgr = sim.get_asset_template_manager()
# get the physics object attributes manager
obj_templates_mgr = sim.get_object_template_manager()
# show handles
handles = prim_templates_mgr.get_template_handles()
for handle in handles:
print(handle)
make_video = True
show_video = False
get_frames = True
# %%
observations = []
iconsphere_template = prim_templates_mgr.get_default_icosphere_template(is_wireframe = False)
iconsphere_template_handle = iconsphere_template.handle
id_1 = sim.add_object_by_handle(iconsphere_template_handle)
sim.set_translation(np.array([2.4, -0.64, 0]), id_1)
# set one object to kinematic
sim.set_object_motion_type(habitat_sim.physics.MotionType.KINEMATIC, id_1)
#habitat_sim.physics.MotionType.KINEMATIC/STATIC/DYNAMIC
# drop some dynamic objects
id_2 = sim.add_object_by_handle(iconsphere_template_handle)
sim.set_translation(np.array([2.4, -0.64, 0.28]), id_2)
sim.set_object_motion_type(habitat_sim.physics.MotionType.KINEMATIC, id_2)
id_3 = sim.add_object_by_handle(iconsphere_template_handle)
sim.set_translation(np.array([2.4, -0.64, -0.28]), id_3)
sim.set_object_motion_type(habitat_sim.physics.MotionType.DYNAMIC, id_3)
id_4 = sim.add_object_by_handle(iconsphere_template_handle)
sim.set_translation(np.array([2.4, -0.3, 0]), id_4)
id_5 = sim.add_object_by_handle(iconsphere_template_handle)
sim.set_translation(np.array([2.4,-0.10,-0.28]), id_5)
# simulate
observations = simulate(sim, dt=2, get_frames=True)
if make_video:
vut.make_video(
observations,
"rgba_camera_1stperson",
"color",
output_path + "2_kinematic_interactions_3STATIC",
open_vid=show_video
)
# %%
|
import os
import pickle
import csv
import requests
import json
from collections import defaultdict
# import facebook
import google.oauth2.credentials
from googleapiclient.discovery import build
from googleapiclient.errors import HttpError
from google_auth_oauthlib.flow import InstalledAppFlow
from google.auth.transport.requests import Request
CLIENT_SECRETS_FILE = "D:\hack\main\client_secret.json"
SCOPES = ['https://www.googleapis.com/auth/youtube.force-ssl']
API_SERVICE_NAME = 'youtube'
API_VERSION = 'v3'
api_key = 'AIzaSyAVJcQ0549l7BnK62jvf3EnITtgeMJXuww'
def get_authenticated_service():
credentials = None
if os.path.exists('D:\hack\main\token.pickle'):
with open('D:\hack\main\token.pickle', 'rb') as token:
credentials = pickle.load(token)
# Check if the credentials are invalid or do not exist
if not credentials or not credentials.valid:
# Check if the credentials have expired
if credentials and credentials.expired and credentials.refresh_token:
credentials.refresh(Request())
else:
flow = InstalledAppFlow.from_client_secrets_file(
CLIENT_SECRETS_FILE, SCOPES)
credentials = flow.run_console()
# Save the credentials for the next run
with open('D:\hack\main\token.pickle', 'wb') as token:
pickle.dump(credentials, token)
return build(API_SERVICE_NAME, API_VERSION, credentials = credentials)
username = []
def get_video_comments(service, **kwargs):
comments = []
results = service.commentThreads().list(**kwargs).execute()
for item in results['items']:
print(item)
print()
print()
while results:
for item in results['items']:
username.append(item['snippet']['topLevelComment']['snippet']['authorDisplayName'])
comment_id = item['snippet']['topLevelComment']['id']
comment = item['snippet']['topLevelComment']['snippet']['textDisplay']
l = [comment_id, comment]
comments.append(l)
if 'nextPageToken' in results:
kwargs['pageToken'] = results['nextPageToken']
results = service.commentThreads().list(**kwargs).execute()
else:
break
return comments
# def write_to_csv(comments):
# with open('comments1.csv', 'w') as comments_file:
# comments_writer = csv.writer(comments_file, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL)
# comments_writer.writerow(['Video ID', 'Title', 'Username', 'Comment'])
# for row in comments:
# comments_writer.writerow(list(row))
def get_my_uploads_list(service):
# Retrieve the contentDetails part of the channel resource for the
# authenticated user's channel.
channels_response = service.channels().list(
mine=True,
part='contentDetails'
).execute()
for channel in channels_response['items']:
# From the API response, extract the playlist ID that identifies the list
# of videos uploaded to the authenticated user's channel.
return channel['contentDetails']['relatedPlaylists']['uploads']
return None
data=defaultdict(list)
def list_my_uploaded_videos(service, uploads_playlist_id):
# Retrieve the list of videos uploaded to the authenticated user's channel.
url = ('https://commentanalyzer.googleapis.com/v1alpha1/comments:analyze' + '?key=' + api_key)
playlistitems_list_request = service.playlistItems().list(
playlistId=uploads_playlist_id,
part='snippet',
maxResults=5
)
# print(playlistitems_list_request)
# print 'Videos in list %s' % uploads_playlist_id
final_result = []
troll_responses = []
while playlistitems_list_request:
playlistitems_list_response = playlistitems_list_request.execute()
# print(playlistitems_list_response)
# Print information about each video.
for playlist_item in playlistitems_list_response['items']:
title = playlist_item['snippet']['title']
# print(title)
video_id = playlist_item['snippet']['resourceId']['videoId']
# print(video_id)
# print '%s (%s)' % (title, video_id)
comments = get_video_comments(service, part='snippet', videoId=video_id, textFormat='plainText')
# print(comments)
for i in range(len(comments)):
data_dict = {}
data_dict['comment'] = {}
data_dict['comment']['text'] = comments[i][1]
data_dict['languages'] = ['en']
data_dict['requestedAttributes'] = {}
data_dict['requestedAttributes']['TOXICITY'] = {}
# print(data_dict)
response = requests.post(url=url, data=json.dumps(data_dict))
# print(count)
# count += 1
# time.sleep(1)
response_dict = json.loads(response.content)
troll_responses.append(response_dict)
# print(data_dict['comment']['text'])
# print()
# print(json.dumps(response_dict, indent=2))
# print("------------------------------------------------------")
final_result.extend([(video_id, title, comment) for comment in comments])
playlistitems_list_request = service.playlistItems().list_next(playlistitems_list_request, playlistitems_list_response)
# print(troll_responses)
file = open('D:\hack\main\delete_msg.txt', 'w')
print(len(troll_responses))
x = len(comments)
for i in range(len(troll_responses)):
if x==0:
break
print(i)
# print(comments[i][1])
val = troll_responses[i]['attributeScores']['TOXICITY']['summaryScore']['value']
print(val)
# print(message[i])
# print(val)
if val*100 > 80:
# file.write(comments[i][1]+'\n')
file.write(username[i]+' | '+comments[i][1]+'\n')
data[username[i]].append(comments[i][1])
# delelte_comment(service, comments[i][0])
x -= 1
file.close()
return(data)
# write_to_csv(final_result)
# playlistitems_list_request = youtube.playlistItems().list_next(
# playlistitems_list_request, playlistitems_list_response)
def delelte_comment(service, comment_id):
request = service.comments().setModerationStatus(
id=comment_id,
moderationStatus="published",
banAuthor=False
)
request.execute()
def getyoutube():
os.environ['OAUTHLIB_INSECURE_TRANSPORT'] = '1'
service = get_authenticated_service()
uploads_playlist_id = get_my_uploads_list(service)
# print(uploads_playlist_id)
data=list_my_uploaded_videos(service, uploads_playlist_id)
return(data)
# if __name__ == '__main__':
# When running locally, disable OAuthlib's HTTPs verification. When
# running in production *do not* leave this option enabled.
# os.environ['OAUTHLIB_INSECURE_TRANSPORT'] = '1'
# service = get_authenticated_service()
# uploads_playlist_id = get_my_uploads_list(service)
# # print(uploads_playlist_id)
# list_my_uploaded_videos(service, uploads_playlist_id)
# delelte_comment(service)
# service.comments().delete(id='ZciQmtWTN6k').execute()
# search_videos_by_keyword(get_my_uploads_list(service))
# keyword = input('Enter a keyword: ')
# search_videos_by_keyword(service, q=keyword, part='id,snippet', eventType='completed', type='video') |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Ed Mountjoy
#
import sys
import os
import argparse
import pandas as pd
import logging
def main():
# Parse args
args = parse_args()
# Start logging
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
handler = logging.FileHandler(args.log)
logger.addHandler(handler)
#
# Load data ----------------------------------------------------------------
#
# Load assoc data
loci = pd.read_csv(args.inf, sep='\t', header=0)
logger.info('Total inital associations: {0}'.format(loci.shape[0]))
# Filter pbased on p-value
pval = loci.pval_mantissa * (10 ** loci.pval_exponent)
loci = loci.loc[pval <= args.min_p, :]
logger.info('N associations after P-value filter: {0}'.format(loci.shape[0]))
# Get chrom and pos from variant ID
loci[['chrom', 'pos', 'alleles']] = (
loci.variant_id_b38
.str.split('_', 2, expand=True) )
loci['pos'] = loci['pos'].astype(int)
#
# Initial clustering -------------------------------------------------------
# Produce statistics on how many loci there are pre/post clustering
#
# WARNING, this must be done on a df deduplicated on (study_id, chrom, pos)
# as the process of mapping GWAS Catalog RSIDs to variant IDs can be one
# to many, meaning that false independent loci will have been introduced
loci_dedup = loci.drop_duplicates(subset=['study_id', 'chrom', 'pos'])
logger.info('N associations after deduplication on study, chrom, pos: {0}'.format(loci_dedup.shape[0]))
# Perform pre-clustering
loci_initial_cluster = (
loci_dedup.groupby('study_id')
.apply(distance_clumping, dist=args.cluster_dist_kb)
.reset_index()
)
logger.info('N associations after initial clustering: {0}'.format(loci_initial_cluster.shape[0]))
# Calc number of loci per study
pre_num = ( loci_dedup.groupby('study_id')
.pos
.count()
.reset_index()
.rename(columns={'pos': 'pre_clustering'}) )
post_num = ( loci_initial_cluster.groupby('study_id')
.pos
.count()
.reset_index()
.rename(columns={'pos': 'post_clustering'}) )
cluster_stats = pd.merge(pre_num, post_num)
# Calculate stats on whether they will be clustered
cluster_stats['proportion_multi'] = (
((cluster_stats.pre_clustering - cluster_stats.post_clustering) /
cluster_stats.pre_clustering) )
cluster_stats['to_cluster'] = (
(cluster_stats.proportion_multi > args.cluster_multi_prop) &
(cluster_stats.pre_clustering >= args.cluster_min_loci) )
# cluster_stats.to_csv('cluster_stats.tsv', sep='\t', index=None)
# Get list of studies for which clustering should be applied
studies_to_cluster = cluster_stats.loc[cluster_stats.to_cluster, 'study_id']
#
# Main clustering ----------------------------------------------------------
#
# Split studies that require clustering from those that don't
loci_no_cluster = loci.loc[~loci.study_id.isin(studies_to_cluster), :]
loci_to_cluster = loci.loc[loci.study_id.isin(studies_to_cluster), :]
logger.info('Clustering will be applied to N studies: {0}'.format(loci_to_cluster.study_id.nunique()))
logger.info('Clustering will not be applied to N studies: {0}'.format(loci_no_cluster.study_id.nunique()))
logger.info('Total N studies: {0}'.format(loci.study_id.nunique()))
logger.info('Clustering will be applied to the following studies: {0}'.format(list(studies_to_cluster)))
# Apply main distance clustering
loci_to_clustered = (
loci_to_cluster.groupby('study_id')
.apply(distance_clumping, dist=args.cluster_dist_kb)
.reset_index()
.drop('level_1', axis=1)
)
# Concatenate clustered and none clustered
final_loci = pd.concat([loci_no_cluster, loci_to_clustered])
logger.info('Final num associations: {0}'.format(final_loci.shape[0]))
# Checks
assert final_loci.study_id.nunique() == loci.study_id.nunique()
assert final_loci.shape[1] == loci.shape[1]
# Write output
(
final_loci.drop(['chrom', 'pos', 'alleles'], axis=1)
.to_csv(args.outf, sep='\t', index=None)
)
def distance_clumping(df, dist=500):
""" Does distance based clumping.
Args:
df: pandas df in standard format
dist: (kb) distance around index SNP to clump
Returns:
pandas df with additional columns showing cluster number
"""
# Sort by pval and deduplicate
df = (
df.sort_values(['pval_exponent', 'pval_mantissa'])
.drop_duplicates(subset='pos')
)
# Initiate clustering
df["cluster"] = None
clusnum = 1
unclustered = pd.isnull(df["cluster"])
# Continue clumping whilst there are unclustered SNPs
while unclustered.any():
# Get index row
index_row = df.loc[unclustered, :].iloc[0]
# Find other rows within set distance
in_cluster = ( (df["chrom"] == index_row["chrom"]) &
(df["pos"] >= index_row["pos"] - dist * 1000) &
(df["pos"] <= index_row["pos"] + dist * 1000) &
unclustered )
df.loc[in_cluster, "cluster"] = clusnum
# Increase cluster number
clusnum += 1
unclustered = pd.isnull(df["cluster"])
# Deduplicate on cluster num
df = (
df.drop_duplicates(subset='cluster', keep='first')
.drop(['study_id', 'cluster'], axis=1)
)
return df
def parse_args():
""" Load command line args """
parser = argparse.ArgumentParser()
parser.add_argument('--inf', metavar="<str>", help=('Input'), type=str, required=True)
parser.add_argument('--outf', metavar="<str>", help=("Output"), type=str, required=True)
parser.add_argument('--log', metavar="<str>", help=("Log output"), type=str, required=True)
parser.add_argument('--min_p', metavar="<float>", help=("Minimum p-value to be included"), type=float, required=True)
parser.add_argument('--cluster_dist_kb', metavar="<int>", help=("± Distance in Kb"), type=int, required=True)
parser.add_argument('--cluster_min_loci', metavar="<int>", help=("Minimum number of reported loci for that study to be included in the clustering analysis"), type=int, required=True)
parser.add_argument('--cluster_multi_prop', metavar="<float>", help=("For a given study, if more than this proportion of loci are multi-signals (>1 signal within gwas_cat_cluster_dist_kb), the study will be clustered"), type=float, required=True)
args = parser.parse_args()
return args
if __name__ == '__main__':
main()
|
# -*-coding=utf-8-*-
def py3_case():
import hashlib
import time
import requests
# 找群主购买 my_app_key, myappsecret, 以及蚂蚁代理服务器的 mayi_url 地址和 mayi_port 端口
my_app_key = "104595392"
app_secret = "c978952ede1661bd5342b34ca0bf561e"
mayi_url = 'http://s5.proxy.mayidaili.com'
mayi_port = '8123'
# 蚂蚁代理服务器地址
mayi_proxy = {'http': 'http://{}:{}'.format(mayi_url, mayi_port)}
# 准备去爬的 URL 链接
url = 'http://members.3322.org/dyndns/getip'
# 计算签名
timesp = '{}'.format(time.strftime("%Y-%m-%d %H:%M:%S"))
codes = app_secret + 'app_key' + my_app_key + 'timestamp' + timesp + app_secret
sign = hashlib.md5(codes.encode('utf-8')).hexdigest().upper()
# 拼接一个用来获得蚂蚁代理服务器的「准入」的 header (Python 的 concatenate '+' 比 join 效率高)
authHeader = 'MYH-AUTH-MD5 sign=' + sign + '&app_key=' + my_app_key + '×tamp=' + timesp
# 用 Python 的 Requests 模块。先订立 Session(),再更新 headers 和 proxies
s = requests.Session()
s.headers.update({'Proxy-Authorization': authHeader})
s.proxies.update(mayi_proxy)
pg = s.get(url, timeout=(300, 270)) # tuple: 300 代表 connect timeout, 270 代表 read timeout
# pg.encoding = 'GB18030'
print(pg.text)
def py2_case():
import hashlib
import time
import urllib2
# 请替换appkey和secret
appkey = "104595392"
secret = "c978952ede1661bd5342b34ca0bf561e"
paramMap = {
"app_key": appkey,
"timestamp": time.strftime("%Y-%m-%d %H:%M:%S") # 如果你的程序在国外,请进行时区处理
}
# 排序
keys = paramMap.keys()
keys.sort()
codes = "%s%s%s" % (secret, str().join('%s%s' % (key, paramMap[key]) for key in keys), secret)
# 计算签名
sign = hashlib.md5(codes).hexdigest().upper()
paramMap["sign"] = sign
# 拼装请求头Proxy-Authorization的值
keys = paramMap.keys()
authHeader = "MYH-AUTH-MD5 " + str('&').join('%s=%s' % (key, paramMap[key]) for key in keys)
print authHeader
# 接下来使用蚂蚁动态代理进行访问
proxy_handler = urllib2.ProxyHandler({"http": 'http://s5.proxy.mayidaili.com:8123'})
opener = urllib2.build_opener(proxy_handler)
request = urllib2.Request('http://members.3322.org/dyndns/getip')
# // 将authHeader放入请求头中即可, 注意authHeader必须在每次请求时都重新计算,要不然会因为时间误差而认证失败
request.add_header('Proxy-Authorization', authHeader)
response = opener.open(request)
print response.read()
py2_case() |
class MyClass(object):
def __init__(self, msg):
self.msg = msg
def say_stuff(self):
print("Say stuff message is: {}\n\n".format(self.msg))
# normal call and instantiation
ob1 = MyClass("Hello there, first example.")
ob1.say_stuff()
# "self" really isn't that special
class MyClass(object):
def __init__(her, msg):
her.msg = msg
def say_stuff(her):
print("Say stuff message is: {}\n\n".format(her.msg))
# normal call and instantiation
ob1 = MyClass("Hello there, This is me.")
ob1.say_stuff()
# Rearrange the call to show relationship between class and instance explicitly.
say_stuff = MyClass.say_stuff
say_stuff(ob1)
# Place the instance and method in separate classes.
class MyClass(object):
def say_stuff(self):
print("Say stuff message is: {}\n\n".format(self.msg))
class OtherClass(object):
def __init__(self, msg):
self.msg = msg
other_ob = OtherClass("Hello there, folks. This is the other_ob instance")
MyClass.say_stuff(other_ob)
# Working without an init with attribute setting instead of another class.
class MyClass(object):
def say_stuff(self):
print("Say stuff message is: {}\n\n".format(self.msg))
ob = MyClass()
setattr(ob, "msg", "Here is a message from a set attribute")
ob.say_stuff()
# Patching both the class and it's instance.
class MyClass(object):
pass
ob = MyClass()
def say_stuff(self):
print("The say stuff message here is: {}\n\n".format(self.msg))
setattr(MyClass, "say_stuff", say_stuff)
setattr(ob, "msg", "here is another patched in message to go with the patched in class")
ob.say_stuff()
#Do the same kind of patch job but with direct dictionary access instead. (to the instance)
class MyClass(object):
pass
ob = MyClass()
def say_stuff(self):
print("The say stuff message here is: {}\n\n".format(self.msg))
setattr(MyClass, "say_stuff", say_stuff)
ob.__dict__["msg"] = "Here is a message that gets set via the instance dictionary"
ob.say_stuff()
# Note: No You may not do a direct __dict__ on a class because that dictionary is a "mappingproxy" and won't let you do it that way.
# Metaprogramming zone here????
# Let's now take a method and replace it with a new one.
class MyClass(object):
def __init__(self, msg):
self.msg = msg
def say_stuff(self):
print("My standard say stuff message is: {}\n\n".format(self.msg))
def say_other_stuff(self):
print("The new version of say_stuff says it like this: {}\n\n".format(self.msg))
MyClass.say_stuff = say_other_stuff
ob = MyClass("This is my message here")
ob.say_stuff()
# Let's outright ignore a class method and make up our own.
class MyClass(object):
def __init__(self, msg):
self.msg = msg
def say_stuff(self):
print("My standard say stuff message is: {}\n\n".format(self.msg))
ob = MyClass("Here is the original message")
def say_stuff():
m = "Surprise!!! You thought I was going to say something else, didn't you?"
print(m)
#setattr(ob, "say_stuff", say_stuff)
ob.__dict__["say_stuff"] = say_stuff
ob.say_stuff()
# THE END |
from container import *
from data_generator import *
class Decider:
def __init__(self, fermentation, fermentation_file):
self.fermentation_program = fermentation
self.fermentation_file = fermentation_file
self.times = list()
self.tannins = list()
self.color = list()
self.density = list()
self.temperature = list()
def setNewData(self, new_time, tannins, color, density, temperature):
self.times.append(new_time)
self.tannins.append(tannins)
self.color.append(color)
self.density.append(density)
self.temperature.append(temperature)
self.calculateTrend()
def calculateTrend(self):
# TODO: calculate trend from new data and from expected data and compare
# https://stackoverflow.com/questions/3949226/calculating-pearson-correlation-and-significance-in-python
# https://www.mathworks.com/help/matlab/ref/polyfit.html
print('does nothing at the moment')
|
import time
from flask import Flask, jsonify, request
from models.movies import Movies
from markupsafe import escape
from flask_cors import CORS
from clickhouse_driver.client import Client
import json
from models.database import DataBasa
app = Flask(__name__)
CORS(app)
@app.route('/test')
def get_cur_time():
client = Client('127.0.0.1')
data = client.execute('select 1')
return {'databasa': data}
@app.route('/top25')
def top25():
client = Client('127.0.0.1')
data = client.execute('SELECT assetid, arrayElement(groupArray(title), 1) as title, count() AS cnt_watch FROM events WHERE eventtype = 31 GROUP BY assetid ORDER BY cnt_watch DESC LIMIT 25')
return {'top25': data}
@app.route('/boevik')
def boevik():
client = Client('127.0.0.1')
settt = {}
genres = ['Боевики', 'Вестерны', "Детективы", "Для детей", 'Комедии', "Мелодрамы", "Мультфильмы", "Приключения", "Спорт", "Триллеры", "Ужасы", "Фантастика"]
for i in genres:
data = client.execute(f"""SELECT assetid, arrayElement(groupArray(title), 1), count() as cnt FROM events WHERE has(splitByChar(',', genretitles), '{i}') = 1 and (eventtype = 31 or eventtype = 15) GROUP BY assetid ORDER BY cnt DESC LIMIT 5""")
settt[i] = data
return {'fuckinAll': settt}
@app.route('/similar/<int:id>')
def similar(id):
client = Client('127.0.0.1')
settt = []
data = client.execute(f"""SELECT similar_assetids FROM similarfilms WHERE assetid = {id}""")[0][0]
splitted = data.split(',')
for i in splitted:
hold_bitch = client.execute(f"""select assetid, arrayElement(groupArray(title), 1) from events where assetid = {i} group by assetid""")
try:
tit_id = hold_bitch[0]
except IndexError:
continue
settt.append(tit_id)
if len(settt) == 5:
break
return {'Fuckinall': settt}
@app.route('/create/<int:id>+<int:views>')
def create(id,views):
db = DataBasa()
db.add_shit(id,views)
return {'time': time.time()}
@app.route('/request/<int:id>')
def request(id):
db = DataBasa()
res = db.get_by_id(id)
if(isinstance(res,type(None))):
db.add_shit(id,0)
res =0
message = ""
if(res==0):
message = 0
elif(res<25):
message = 1
else:
message = 2
return {'res': message}
@app.route('/add_like/<int:movie_id>')
def create_add_like(movie_id):
return {'time': time.time()}
@app.route('/get_movie/<int:id>')
def get_movie(id):
db = Movies()
movi = db.get_by_id(id)
return {'time': movi}
@app.route('/get_boeviks')
def get_boevik():
db = Movies()
boeviks = db.get_first_five()
return {'boeviks': boeviks}
@app.route('/send_likes/<string:likes>')
def add_message(likes):
list_like = likes
client = Client('127.0.0.1')
settt = []
hold_me = client.execute(f"""SELECT topK(25)(arrayJoin(sim)) from (select d, arrayFlatten(groupArray(sim)) as sim from (select splitByChar(',', similar_assetids) as sim, today() as d from similarfilms where assetid in ({list_like})) group by d)""")
for i in hold_me[0][0]:
hold_bitch = client.execute(f"""select assetid, arrayElement(groupArray(title), 1) from events where assetid = {i} group by assetid""")
try:
tit_id = hold_bitch[0]
except IndexError:
continue
settt.append(tit_id)
if len(settt) == 25:
break
return {'custoized' : settt}
|
from spack import *
import sys,os
sys.path.append(os.path.join(os.path.dirname(__file__), '../../common'))
from scrampackage import write_scram_toolfile
class GblToolfile(Package):
url = 'file://' + os.path.dirname(__file__) + '/../../common/junk.xml'
version('1.0', '68841b7dcbd130afd7d236afe8fd5b949f017615', expand=False)
depends_on('gbl')
def install(self, spec, prefix):
values = {}
values['VER'] = spec['gbl'].version
values['PFX'] = spec['gbl'].prefix
fname = 'gbl.xml'
contents = str("""<tool name="gbl" version="$VER">
<lib name="GBL"/>
<client>
<environment name="GBL_BASE" default="$PFX"/>
<environment name="INCLUDE" default="$$GBL_BASE/include"/>
<environment name="LIBDIR" default="$$GBL_BASE/lib"/>
</client>
<use name="eigen"/>
</tool>""")
write_scram_toolfile(contents, values, fname, prefix)
|
print("hello open source") |
class IntegrationPoint(object):
"""Entity used to carry data related to each integration point
"""
def __init__(self, tria, n1, n2, n3, f1, f2, f3, nx, ny, nz, le):
self.xyz = f1*n1.xyz + f2*n2.xyz + f3*n3.xyz
self.tria = tria
self.n1 = n1
self.n2 = n2
self.n3 = n3
self.f1 = f1
self.f2 = f2
self.f3 = f3
self.nx = nx
self.ny = ny
self.nz = nz # 3D case
self.le = le # length of the line where the integration point lies on
class Edge(object):
def __init__(self, n1, n2):
self.n1 = n1
self.n2 = n2
self.nodes = [n1, n2]
self.node_ids = [n1.nid, n2.nid]
self.trias = []
self.sdomain = []
self.ipts = []
self.Ac = None
self.othernode1 = None
self.othernode2 = None
def __str__(self):
return 'Edge (%s, %s)' % (self.n1.nid, self.n2.nid)
def __repr__(self):
return self.__str__()
def getMid(self):
try:
return 0.5*(self.n1 + self.n2)
except:
return 0.5*(self.n1.xyz + self.n2.xyz)
|
# Generated by Django 2.2.1 on 2019-05-19 20:32
from django.db import migrations
import django_countries.fields
import localflavor.us.models
class Migration(migrations.Migration):
dependencies = [
('events', '0010_auto_20190519_2021'),
]
operations = [
migrations.AlterField(
model_name='event',
name='event_country',
field=django_countries.fields.CountryField(blank=True, max_length=2, null=True),
),
migrations.AlterField(
model_name='event',
name='event_state',
field=localflavor.us.models.USStateField(blank=True, max_length=2, null=True, verbose_name='Event state (US only)'),
),
]
|
from flask import abort
from knowledge_graph.Mind import Mind
from knowledge_graph import app
class BaseConfig(object):
DEBUG = False
class DevelopmentConfig(BaseConfig):
DEBUG = True
TESTING = True
try:
app.config["MIND"] = Mind()
except (FileNotFoundError, IsADirectoryError, ValueError):
abort(404)
class TestingConfig(BaseConfig):
DEBUG = False
TESTING = True
# todo: add mock mind here |
from string import maketrans, translate
def correct(s):
return translate(s, maketrans('501', 'SOI'))
|
class Demo2:
@staticmethod #decorator
def sample1():
print("I am Static Method")
@classmethod #decorator
def sample2(cls):
print(" I am Class method",cls)
def sample3(self): # instance method
print(" I am Instance Method",self)
#-----------------------
#Calling Static Method
Demo2.sample1()
#Calling Class Method
Demo2.sample2()
#Calling Instance Method using Object
Demo2().sample3()
#Calling Instance Method using Object ref variable
d1 = Demo2()
d1.sample3()
|
from keras.preprocessing.image import ImageDataGenerator
import keras.applications as keras_applications
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import numpy as np
from sklearn.metrics import classification_report, confusion_matrix
import pandas as pd
import sys
from timeit import default_timer as timer
from keras import backend as K
from keras.callbacks import CSVLogger
import json
from sklearn.metrics import confusion_matrix
import argparse
# from colorama import Fore, Style, init
module_name = "mlsymmetric"
parser = argparse.ArgumentParser(description = "mlsymmetric")
parser.add_argument('model_num',
metavar = 'MODEL_NUMBER', type=int,
help='Model number')
parser.add_argument('exp_name',
metavar = 'EXP_NAME', type=str,
help='Name of experiment directory')
parser.add_argument('dataset_name',
metavar = 'DATASET_NAME', type=str,
help='Dataset directory')
parser.add_argument('classes',
metavar = 'CLASSES', type=str,
help='List of classes')
parser.add_argument('-ver', '--version',
action='version', version="1.0",
help='Display version information and dependencies.')
parser.add_argument('-nocol', '--nocolor',
action='store_true', default = False,
help='Disables color in terminal')
detail = parser.add_mutually_exclusive_group()
detail.add_argument('-q', '--quiet',
action='store_true',
help='Print quiet')
detail.add_argument('-v', '--verbose',
action='store_true',
help='Print verbose')
args = parser.parse_args()
rootoutput='outputs/'
rootdataset='dataset/'
model_num = args.model_num - 1
exp_name = args.exp_name
dataset_name = args.dataset_name
class_list = args.classes.split(',')
n_classes = len(class_list)
test_path = rootdataset + dataset_name + "/test/"
checkpoint_dir = rootoutput + exp_name + "/models/"
'''
if not args.nocolor:
init()
'''
if not args.quiet:
print("Test Path:", test_path)
print("Checkpoint Directory:", checkpoint_dir)
print("Classes:", class_list)
start = timer()
# When is this used?
calculatepercentage = 0
input_shape = (200,200,1)
img_width, img_height = 200, 200
V_batch_size=32
names = [
'ResNet50',
'MobileNet',
'MobileNetV2',
'NASNetMobile',
'NASNetLarge',
'VGG16',
'VGG19',
'Xception',
'InceptionResNetV2',
'DenseNet121',
'DenseNet201'
]
models = [
keras_applications.ResNet50,
keras_applications.MobileNet,
keras_applications.MobileNetV2,
keras_applications.NASNetMobile,
keras_applications.NASNetLarge,
keras_applications.VGG16,
keras_applications.VGG19,
keras_applications.Xception,
keras_applications.InceptionResNetV2,
keras_applications.DenseNet121,
keras_applications.DenseNet201
]
model_name = str(model_num)+ "_" + names[model_num]
if not args.quiet:
print("Model:", model_name)
model = models[model_num](weights = None, input_shape = input_shape, classes = n_classes)
model.compile(optimizer = 'adam', loss = 'categorical_crossentropy', metrics = ['accuracy'])
weightfile = checkpoint_dir + model_name + '_checkpoint.best.hdf5'
if not args.quiet:
print("loading model", weightfile)
model.load_weights(weightfile)
# When is wild_datagen used?
wild_datagen = ImageDataGenerator(rescale = 1. / 255)
test_datagen = ImageDataGenerator(rescale = 1. / 255)
test_generator = test_datagen.flow_from_directory(test_path, classes = class_list,
target_size = (img_width, img_height), batch_size = V_batch_size, shuffle = False,
class_mode = 'categorical', color_mode = "grayscale")
steps = np.ceil(test_generator.samples / V_batch_size)
Y_pred = model.predict_generator(test_generator, steps = steps)
acclasses = test_generator.classes[test_generator.index_array]
y_pred = np.argmax(Y_pred, axis = -1)
print(model_name, "acc percentage", sum(y_pred == acclasses)/len(Y_pred))
if not args.quiet:
print(confusion_matrix(acclasses, y_pred))
file_names = np.array(test_generator.filenames)
name_nums = np.zeros(file_names.size, dtype = [('names', 'U30'), ('y_pred', int), ('acclasses', int)])
name_nums['names'] = file_names
name_nums['y_pred'] = y_pred
# Add Y_Pred
name_nums['acclasses'] = acclasses
np.savetxt('name_pred_acc.csv', name_nums, delimiter = ',', header = "File Name,P. Class,A. Class", fmt = "%s%i%i")
print("name_pred_acc.csv has been created.")
|
import xml.etree.ElementTree as ET
import requests
import re
def get_rating_by_isbn(isbn):
key = "1emC5V4L1aQXNtWBo7SpKw"
response = requests.get('https://www.goodreads.com/book/isbn/'+isbn+'?key='+key)
root = ET.fromstring(response.content)
for book in root.findall('book'):
title = book.find('title').text
isbn = book.find('isbn').text
image_url = book.find('image_url').text
small_image_url = book.find('small_image_url').text
description = book.find('description').text
publication_year = book.find('publication_year').text
publisher = book.find('publisher').text
average_rating =book.find('average_rating').text
ratings_count = book.find('ratings_count').text
author = book.find('authors').find('author').find('name').text
book_object = {
"title" : title,
"isbn10" : isbn,
"image_url" : image_url,
"small_image_url" : small_image_url,
"publisher" : publisher,
"description" : description,
"publication_year": publication_year,
"average_rating" : average_rating,
"ratings_count" : ratings_count,
"author" : author,
}
print(book_object)
return(book_object) |
from django import forms
from .models import Auction
from datetime import timedelta
from django.utils import timezone
from django.core.cache import cache
#iterable per la scelta della durata dell'asta
DURATION_CHOICES = (
(1, "1 minute"),
(5, "5 minutes"),
(30, "30 minutes"),
(60, "1 hour"),
(1440, "1 day")
)
class CreateAuctionForm(forms.ModelForm):
duration = forms.ChoiceField(choices=DURATION_CHOICES)
class Meta:
model = Auction
fields = ['asset_title', 'description', 'entry_price']
#Validazione sul prezzo iniziale dell'oggetto da astare
def clean_entry_price(self):
entry_price = self.cleaned_data['entry_price']
if entry_price < 1:
raise forms.ValidationError("You must type at least 1$ entry price for the auction")
return entry_price
class BidAuctionForm(forms.ModelForm):
class Meta:
model = Auction
fields = ['bid']
#Validazione sulla nuova offerta
def clean_bid(self):
bid = self.cleaned_data['bid']
auction = Auction.objects.get(id=self.instance.id)
if cache.get(f"{auction.id}") is None:
old = auction.entry_price
else:
old = cache.get(f"{auction.id}")[1]
if bid <= old:
raise forms.ValidationError(f"You have to offer more than {old}")
return bid
|
#!/bin/python3
# Including snakefile for mapping pipeline steps
include: 'snakefiles/alignment_mapping_qc.snake'
|
for i in range(1, 50+1):
if i %3==0:
print('fast')
elif i &5==0:
print('campus')
else:
print('{}'.format(i))
|
from . import EncodeTransform, ToTensorTransform |
import logging
import zipfile
from Acquisition import aq_inner
from five import grok
from zope.component import getMultiAdapter
from Products.CMFPlone.interfaces import IPloneSiteRoot
from AccessControl import Unauthorized
from Products.CMFCore.utils import getToolByName
from Products.Five.browser.decode import processInputs
from Products.statusmessages.interfaces import IStatusMessage
from sc.newsletter.creator import MessageFactory as _
from sc.newsletter.creator.utils import extractResourceName
from sc.newsletter.creator.utils import getOrCreatePersistentResourceDirectory
from sc.newsletter.creator.config import NEWSLETTER_RESOURCE_NAME, MANIFEST_FORMAT
logger = logging.getLogger('sc.newsletter.creator')
class View(grok.View):
grok.context(IPloneSiteRoot)
grok.require('cmf.ManagePortal')
grok.name('newslettercreator-controlpanel')
def __init__(self, *args, **kwargs):
super(View, self).__init__(*args, **kwargs)
self.context = aq_inner(self.context)
self.portal_state = getMultiAdapter((self.context, self.request),
name=u'plone_portal_state')
self.plone_tools = getMultiAdapter((self.context, self.request),
name=u'plone_tools')
self.catalog = self.plone_tools.catalog()
def update(self):
processInputs(self.request)
self.errors = {}
submitted = False
form = self.request.form
if 'form.button.Cancel' in form:
self.redirect(_(u"Changes canceled."))
return False
if 'form.button.Import' in form:
self.authorize()
submitted = True
replaceExisting = form.get('replaceExisting', False)
newslettercreatorArchive = form.get('newslettercreatorArchive', None)
newslettercreatorZip = None
performImport = False
try:
newslettercreatorZip = zipfile.ZipFile(newslettercreatorArchive)
except (zipfile.BadZipfile, zipfile.LargeZipFile,):
logger.exception("Could not read zip file")
self.errors['newslettercreatorArchive'] = _('error_invalid_zip',
default=u"The uploaded file is not a valid Zip archive"
)
if newslettercreatorZip:
resourceName = extractResourceName(newslettercreatorZip)
newslettercreatorContainer = getOrCreatePersistentResourceDirectory()
newslettercreatorExists = resourceName in newslettercreatorContainer
if newslettercreatorExists:
if not replaceExisting:
self.errors['newslettercreatorArchive'] = _('error_already_installed',
u"This newsletter theme is already installed. Select 'Replace existing newsletter theme' and re-upload to replace it."
)
else:
del newslettercreatorContainer[resourceName]
performImport = True
else:
performImport = True
if performImport:
newslettercreatorContainer.importZip(newslettercreatorZip)
if submitted and not self.errors:
IStatusMessage(self.request).add(_(u"Changes saved"))
elif submitted:
IStatusMessage(self.request).add(_(u"There were errors"), 'error')
return True
def authorize(self):
authenticator = getMultiAdapter((self.context, self.request), name=u"authenticator")
if not authenticator.verify():
raise Unauthorized
def redirect(self, message):
IStatusMessage(self.request).add(message)
portalUrl = getToolByName(self.context, 'portal_url')()
self.request.response.redirect("%s/plone_control_panel" % portalUrl)
|
from django.http import HttpResponse
from django.shortcuts import render
def homepage(request):
return render(request,'home.html',{'hithere':'hero'})
def aj(request):
return HttpResponse('Hello Hero ;)')
def count(request):
fulltext = request.GET['text']
wordlist=fulltext.split()
worddict=dict()
for word in wordlist:
if word in worddict:
worddict[word]+=1
else:
worddict[word]=1
return render(request,'count.html',{'atext':fulltext,'count':len(wordlist),'words':worddict.items()})
def about(request):
return render(request,'about.html') |
from aws_cdk import (
aws_ec2 as ec2,
aws_elasticloadbalancingv2 as elbv2,
core
)
class LoadBalancer(core.Construct):
@property
def alb(self):
return self._alb
def __init__(self, scope: core.Construct, id: str, vpc: ec2.IVpc, security_group: ec2.ISecurityGroup, instances: list, **kwargs):
super().__init__(scope, id, **kwargs)
self._tg = elbv2.ApplicationTargetGroup(
self, "TG",
port=80,
vpc=vpc,
)
for instance in instances:
self._tg.add_target(
elbv2.InstanceTarget(
instance.instance_id,
port=80
)
)
self._alb = elbv2.ApplicationLoadBalancer(
self, "LB",
vpc=vpc,
internet_facing=True,
security_group=security_group
)
listener = self._alb.add_listener("Listener", port=80, default_target_groups=[self._tg]) |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# The above encoding declaration is required and the file must be saved as UTF-8
################################################################################
# Практичне завдання № 3.1
# Вхідні дані: 3 дійсних числа a, b, c. Передаються в програму як аргументи командного рядка.
# Результат роботи: рядок "triangle", якщо можуть існувати відрізки з такою довжиною
# та з них можна скласти трикутник, або "not triangle" -- якщо ні.
# Наприклад
# Вхідні дані: 10 20 30
# Приклад виклику: python lab3_1.py 10 20 30
# Результат: not triangle
# Вхідні дані: 1 1 1
# Приклад виклику: python lab3_1.py 1 1 1
# Результат: triangle
# Вхідні дані: 5.5 5.5 -2
# Приклад виклику: python lab3_1.py 5.5 5.5 -2
# Результат: not triangle
import sys
a = float(sys.argv[1])
b = float(sys.argv[2])
c = float(sys.argv[3])
if a <= 0 or b <= 0 or c <= 0:
# print "Не правильно введені сторони трикутника, вони мають бути > 0"
print "not triangle"
sys.exit()
if (a*a + b*b == c*c) or (a*a + c*c == b*b) or (a == b == c):
print "triangle"
else:
print "not triangle"
|
from bs4 import BeautifulSoup
import lxml
import requests
url = 'https://www.empireonline.com/movies/features/best-movies-2/'
response = requests.get(url)
soup = BeautifulSoup(response.text, "html.parser")
movies = soup.find_all(name='h3', class_='title')
output = ''
my_movies = movies
# for movie in movies:
my_movies.reverse()
for movie in my_movies:
output += f'{movie.getText()}\n'
with open("100 Best Movies.txt", "w") as movie_handle:
movie_handle.writelines(output)
|
from django.shortcuts import render, get_object_or_404
from django.views import generic
from . import models
class TagMixin(object):
def get_tags(self):
return models.Tag.objects.all()
def get_context_data(self, **kwargs):
context = super(TagMixin, self).get_context_data(**kwargs)
context['tags'] = self.get_tags()
return context
class BlogIndex(TagMixin, generic.ListView):
queryset = models.Post.objects.published()
template_name = "blog/index.html"
paginate_by = 3
class BlogDetail(TagMixin, generic.DetailView):
model = models.Post
template_name = "blog/post.html"
class TagIndex(TagMixin, generic.ListView):
template_name = "blog/index.html"
paginate_by = 5
def get_queryset(self):
slug = self.kwargs['slug']
tag = models.Tag.objects.get(slug=slug)
results = models.Post.objects.filter(tags=tag)
return results
|
'''
Author: Jonathan Ek
Last updated: 2017-11-12
Description: Sudoku solver
'''
'''Todo: The program can list all possible numbers of any one square. It can also update these numbers after
has been placed. It is now necessary to identify a unique number for each square. i.e solve the puzzle
'''
squareContents = '200004000900007041600500200057600080000000000030008760001005008520800004000400002'
assert len(squareContents) == 81
rows = 'ABCDEFGHI'
columns = '123456789'
def crossLists(firstList,secondList):
return [a+b for a in firstList for b in secondList]
def unitsContainingSquare(square,unitList): #Return the list of all units containing a specific square
returnList = []
copyOfUnitList = unitList[:]
for j in copyOfUnitList:
if square in j:
copyOfList = j[:]
copyOfList.remove(square)
returnList.append(copyOfList)
return returnList
def identifyImpossibleDigits(gameBoard,unitsOfSquare): #Identify which digits cannot be placed in a versatile square
listToReturn = []
for j in unitsOfSquare:
for i in j:
if (int(gameBoard[i]) > 0):
listToReturn.append(gameBoard[i])
return listToReturn
def updateImpossibleDigitsDictionary(gameBoard,unitsOfSquare):
listToReturn = []
for j in unitsOfSquare:
for i in j:
if(len(gameBoard[i]) == 1):
listToReturn.append(gameBoard[i])
return listToReturn
def identifyVersatileSquares(listOfSquares,gameBoard): #Identify which squares that has several options for a digit
listToReturn = []
for j in listOfSquares:
if gameBoard[j] == '0':
listToReturn.append(j)
return listToReturn
def removeImpossibleDigitsFromBoard(gameBoard,impossibleDigitsDictionary):#Remove the possible candidates which cannot be placed in a square
for j in sorted(impossibleDigitsDictionary):
for i in impossibleDigitsDictionary[j]:
if i in gameBoard[j]:
gameBoard[j] = gameBoard[j].replace(i,"")
return gameBoard
def updateVersatileSquares(gameBoard,versatileSquares):#Check if the versatile squares still are versatile and update them accordingly
for j in versatileSquares:
if(len(gameBoard[j]) == 1):
versatileSquares.remove(j)
return versatileSquares
def solvePuzzle(gameBoard,listOfVersatileSquares,unitDictionary):
for i in range(0,len(listOfVersatileSquares)):
print(listOfVersatileSquares[i])
lst = [1,2,3,4,5]
listOfUnits = [crossLists(row,columns) for row in rows] + [crossLists(rows,col) for col in columns] + [crossLists(a,b)
for a in ['ABC','DEF','GHI']
for b in ['123','456','789']]
listOfSquares = crossLists(rows,columns)
board = {listOfSquares[b]:squareContents[b] for b in range(0,len(squareContents))}
versatileSquares = identifyVersatileSquares(listOfSquares,board)
unitsDict = {square:unitsContainingSquare(square,listOfUnits) for square in sorted(listOfSquares)}
impossibleDigitsDictionary = {square:identifyImpossibleDigits(board,unitsDict[square]) for square in versatileSquares}
for j in sorted(board):
if board[j] == '0':
board[j] = '123456789'
board = removeImpossibleDigitsFromBoard(board,impossibleDigitsDictionary)
versatileSquares = updateVersatileSquares(board,versatileSquares)
impossibleDigitsDictionary = {square:updateImpossibleDigitsDictionary(board,unitsDict[square]) for square in versatileSquares}
board = removeImpossibleDigitsFromBoard(board,impossibleDigitsDictionary)
#for j in sorted(board):
# print(j,board[j])
|
import numpy as np
import math
from typing import List
from tensorflow.keras import backend as K
from tensorflow.keras import layers
from tensorflow.keras.models import Model
from tensorflow.keras.utils import get_file, get_source_inputs
from keras_applications.imagenet_utils import _obtain_input_shape
# For build pip, running out of range of this folder
# from .custom_objects import ConvInitializer, DenseInitializer, Swish, DropConnect
# from .config import BlockArgs, get_default_block_list
# For not build pip, running this file
from custom_objects import ConvInitializer, DenseInitializer, Swish, DropConnect
from config import BlockArgs, get_default_block_list
import os
def round_filters(filters, width_coefficient, depth_divisor, min_depth):
'''Round number of filters based on depth multiplier'''
multiplier = float(width_coefficient)
divisor = int(depth_divisor)
if not multiplier:
return filters
filters *= multiplier
min_depth = min_depth or divisor
new_filters = max(min_depth, int(filters + divisor / 2) // divisor * divisor)
# Make sure that round down does not go down by more than 10%.
if new_filters < 0.9 * filters:
new_filters += divisor
return int(new_filters)
def round_repeats(repeats, depth_coefficient):
multiplier = depth_coefficient
if not multiplier:
return repeats
return int(math.ceil(multiplier * repeats))
def SEBlock(input_filters, se_ratio, expand_ratio, data_format=None):
'''Implement Squeeze and Excitation block'''
if data_format is None:
data_format = K.image_data_format()
num_reduced_filters = max(1, int(input_filters * se_ratio))
filters = input_filters * expand_ratio
if data_format == 'channels_first':
chan_dim = 1
spatial_dims = [2, 3]
else:
chan_dim = -1
spatial_dims = [1, 2]
def block(inputs):
x = inputs
# Squeeze phase
x = layers.Lambda(lambda t:
K.mean(t, axis=spatial_dims, keepdims=True))(x)
x = layers.Conv2D(num_reduced_filters, (1, 1),
strides=(1, 1),
padding='same',
kernel_initializer=ConvInitializer())(x)
x = Swish()(x)
# Excitation phase
x = layers.Conv2D(filters, (1, 1),
strides=(1, 1),
padding='same',
activation='sigmoid',
kernel_initializer=ConvInitializer())(x)
out = layers.Multiply()([x, inputs]) # Another representation for Swish layer
return out
return block
def MBConvBlock(input_filters, output_filters,
kernel_size, strides,
expand_ratio, se_ratio,
id_skip, drop_connect_rate,
batch_norm_momentum=0.99, batch_norm_epsilon=1e-3,
data_format=None):
if data_format is None:
data_format = K.image_data_format()
if data_format == 'channels_first':
chan_dim = 1
spatial_dims = [2, 3]
else:
chan_dim = -1
spatial_dims = [1, 2]
has_se_layer = (se_ratio is not None) and (se_ratio > 0 and se_ratio <= 1)
filters = input_filters * expand_ratio
def block(inputs):
if expand_ratio != 1:
x = layers.Conv2D(filters, (1, 1),
strides=(1, 1),
padding='same',
use_bias=False,
kernel_initializer=ConvInitializer())(inputs)
x = layers.BatchNormalization(axis=chan_dim,
momentum=batch_norm_momentum,
epsilon=batch_norm_epsilon)(x)
x = Swish()(x)
else:
x = inputs
x = layers.DepthwiseConv2D(kernel_size,
strides=strides,
padding='same',
depthwise_initializer=ConvInitializer(),
use_bias=False)(x)
x = layers.BatchNormalization(axis=chan_dim,
momentum=batch_norm_momentum,
epsilon=batch_norm_epsilon)(x)
x = Swish()(x)
if has_se_layer:
x = SEBlock(input_filters, se_ratio, expand_ratio)(x)
x = layers.Conv2D(output_filters, (1, 1),
strides=(1, 1),
padding='same',
use_bias=False,
kernel_initializer=ConvInitializer())(x)
x = layers.BatchNormalization(axis=chan_dim,
momentum=batch_norm_momentum,
epsilon=batch_norm_epsilon)(x)
if id_skip:
if all(s == 1 for s in strides) and (input_filters == output_filters):
# Only apply drop connect if skip presents
if drop_connect_rate:
x = DropConnect(drop_connect_rate)(x)
x = layers.Add()([x, inputs])
return x
return block
def EfficientNet(input_shape,
block_args_list: List[BlockArgs],
width_coefficient: float,
depth_coefficient: float,
include_top=True,
weights=None,
input_tensor=None,
pooling=None,
classes=1000,
drop_rate=0.,
drop_connect_rate=0.,
batch_norm_momentum=0.99,
batch_norm_epsilon=1e-3,
depth_divisor=8,
min_depth=None,
data_format=None,
default_size=None,
**kwargs):
"""
Builder model for EfficientNets
# Args:
input_shape: Optional tuple, depends on the configuration,
Defaults to 224 when None is provided
block_args_list: Optional list of BlockArgs,
each of which detail the args of the MBConvBlock.
If left as None, it defaults to the blocks from the paper
width_coefficient: Determine # of channels available per layer
depth_coefficient: Determine # of layers available to the model
include_top: Whether to include FC layer at the top of the network
weights: `None` (random initialization) or `imagenet` (imagenet weights)
or path to pretrained weight
input_tensor: optional Keras tensor
pooling: Optional pooling mode for feature extraction
when `include_top` is False
- `None`: the output of the model will be 4D tensor output of
the last convolutional layer
- `avg`: global average pooling will be applied to the output of
the last convolutional layer, thus its outpus will be 2D tensor
- `max`: global max pooling will be applied
classes: optional # of classes to classify images into,
only specified if `include_top` is True and `weights` is None
drop_rate: Float, percentage of dropout
drop_connect_rate: Float, percentage of random dropped connection
depth_divisor: Optional. Used when rounding off
the coefficient scaled channels and depth of the layers
min_depth: minimum of depth value to avoid blocks with 0 layer
default_size: default image size of the model
# Raises:
`ValueError`: If weights are not in `imagenet` or None
`ValueError`: If weights are `imagenet` and `classes` is not 1000
# Returns:
A Keras model
"""
if not (weights in ('imagenet', None) or os.path.exists(weights)):
raise ValueError('The `weights` argument should be either '
'`None`, `imagenet` or `path to pretrained weights`')
if weights == 'imagenet' and include_top and classes != 1000:
raise ValueError('If using `weights` as `imagenet` with `include_top` '
'as true, `classes` should be 1000')
if data_format is None:
data_format = K.image_data_format()
if data_format == 'channels_first':
chan_dim = 1
spatial_axis = [2, 3]
else:
chan_dim = -1
spatial_axis = [1, 2]
if default_size is None:
default_size = 224
if block_args_list is None:
block_args_list = get_default_block_list()
# TODO: count # of strides to compute min size
stride_count = 1
for block_args in block_args_list:
if block_args.strides is not None and block_args.strides[0] > 1:
stride_count += 1
min_size = int(2 ** stride_count)
# Determine proper input shape and default size
input_shape = _obtain_input_shape(input_shape, default_size, min_size,
data_format, include_top, weights=weights)
# Stem part
if input_tensor is None:
inputs = layers.Input(shape=input_shape)
else:
if K.is_keras_tensor(input_tensor):
inputs = input_tensor
else:
inputs = layers.Input(shape=input_shape, tensor=input_tensor)
x = inputs
# ! parameters in round_filters
x = layers.Conv2D(
round_filters(32, width_coefficient, depth_divisor, min_depth),
(3, 3),
strides=(2, 2),
padding='same',
kernel_initializer=ConvInitializer(),
use_bias=False)(x)
x = layers.BatchNormalization(axis=chan_dim,
momentum=batch_norm_momentum,
epsilon=batch_norm_epsilon)(x)
x = Swish()(x)
num_blocks = sum([block_args.num_repeat for block_args in block_args_list])
drop_connect_rate_per_block = drop_connect_rate / float(num_blocks)
# Blocks part
for block_idx, block_args in enumerate(block_args_list):
assert block_args.num_repeat > 0, 'Error in # of block'
# Update block input and output filters based on depth multiplier
block_args.input_filters = round_filters(
block_args.input_filters,
width_coefficient,
depth_divisor,
min_depth)
block_args.output_filters = round_filters(
block_args.output_filters,
width_coefficient,
depth_divisor,
min_depth)
block_args.num_repeat = round_repeats(block_args.num_repeat, depth_coefficient)
# The first block needs to take care of stride and filter size
x = MBConvBlock(block_args.input_filters, block_args.output_filters,
block_args.kernel_size, block_args.strides,
block_args.expand_ratio, block_args.se_ratio,
block_args.identity_skip, drop_connect_rate_per_block * block_idx,
batch_norm_epsilon, batch_norm_epsilon, data_format)(x)
if block_args.num_repeat > 1:
block_args.input_filters = block_args.output_filters
block_args.strides = (1, 1)
for _ in range(block_args.num_repeat - 1):
x = MBConvBlock(block_args.input_filters, block_args.output_filters,
block_args.kernel_size, block_args.strides,
block_args.expand_ratio, block_args.se_ratio,
block_args.identity_skip,
drop_connect_rate_per_block * block_idx,
batch_norm_epsilon, batch_norm_momentum, data_format)(x)
# Head part
x = layers.Conv2D(
round_filters(1280, width_coefficient, depth_divisor, min_depth),
(1, 1),
strides=(1, 1),
padding='same',
kernel_initializer=ConvInitializer(),
use_bias=False
)(x)
x = layers.BatchNormalization(axis=chan_dim,
momentum=batch_norm_momentum,
epsilon=batch_norm_epsilon)(x)
x = Swish()(x)
if include_top:
x = layers.GlobalAveragePooling2D(data_format=data_format)(x)
if drop_rate > 0:
x = layers.Dropout(drop_rate)(x)
x = layers.Dense(classes,
activation='softmax',
kernel_initializer=DenseInitializer())(x)
else:
if pooling == 'avg':
x = layers.GlobalAveragePooling2D()(x)
elif pooling == 'max':
x = layers.GlobalMaxPooling2D()(x)
outputs = x
# Ensure that the model takes into account any potential predecessors
if input_tensor is not None:
inputs = get_source_inputs(input_tensor)
model = Model(inputs, outputs)
# Load weights
if weights == 'imagenet':
if default_size == 224:
if include_top:
weights_path = get_file(
'efficientnet-b0.h5',
'https://github.com/titu1994/keras-efficientnets/releases/download/v0.1/efficientnet-b0.h5',
cache_subdir='models'
)
else:
weights_path = get_file(
'efficientnet-b0_notop.h5',
'https://github.com/titu1994/keras-efficientnets/releases/download/v0.1/efficientnet-b0_notop.h5',
cache_subdir='models'
)
elif default_size == 240:
if include_top:
weights_path = get_file(
'efficientnet-b1.h5',
'https://github.com/titu1994/keras-efficientnets/releases/download/v0.1/efficientnet-b1.h5',
cache_subdir='models'
)
else:
weights_path = get_file(
'efficientnet-b1_notop.h5',
'https://github.com/titu1994/keras-efficientnets/releases/download/v0.1/efficientnet-b1_notop.h5',
cache_subdir='models'
)
elif default_size == 260:
if include_top:
weights_path = get_file(
'efficientnet-b2.h5',
'https://github.com/titu1994/keras-efficientnets/releases/download/v0.1/efficientnet-b2.h5',
cache_subdir='models'
)
else:
weights_path = get_file(
'efficientnet-b2_notop.h5',
'https://github.com/titu1994/keras-efficientnets/releases/download/v0.1/efficientnet-b2_notop.h5',
cache_subdir='models'
)
elif default_size == 300:
if include_top:
weights_path = get_file(
'efficientnet-b3.h5',
'https://github.com/titu1994/keras-efficientnets/releases/download/v0.1/efficientnet-b3.h5',
cache_subdir='models'
)
else:
weights_path = get_file(
'efficientnet-b3_notop.h5',
'https://github.com/titu1994/keras-efficientnets/releases/download/v0.1/efficientnet-b3_notop.h5',
cache_subdir='models'
)
elif default_size == 380:
if include_top:
weights_path = get_file(
'efficientnet-b4.h5',
'https://github.com/titu1994/keras-efficientnets/releases/download/v0.1/efficientnet-b4.h5',
cache_subdir='models'
)
else:
weights_path = get_file(
'efficientnet-b4_notop.h5',
'https://github.com/titu1994/keras-efficientnets/releases/download/v0.1/efficientnet-b4_notop.h5',
cache_subdir='models'
)
elif default_size == 456:
if include_top:
weights_path = get_file(
'efficientnet-b5.h5',
'https://github.com/titu1994/keras-efficientnets/releases/download/v0.1/efficientnet-b5.h5',
cache_subdir='models'
)
else:
weights_path = get_file(
'efficientnet-b5_notop.h5',
'https://github.com/titu1994/keras-efficientnets/releases/download/v0.1/efficientnet-b5_notop.h5',
cache_subdir='models'
)
# TODO: Provide links for the last 2 EfficientNet
# elif default_size == 528:
# if include_top:
# weights_path = get_file(
# 'efficientnet-b6.h5',
# 'https://github.com/titu1994/keras-efficientnets/releases/download/v0.1/efficientnet-b6.h5',
# cache_subdir='models'
# )
# else:
# weights_path = get_file(
# 'efficientnet-b6_notop.h5',
# 'https://github.com/titu1994/keras-efficientnets/releases/download/v0.1/efficientnet-b6_notop.h5',
# cache_subdir='models'
# )
# elif default_size == 600:
# if include_top:
# weights_path = get_file(
# 'efficientnet-b7.h5',
# 'https://github.com/titu1994/keras-efficientnets/releases/download/v0.1/efficientnet-b7.h5',
# cache_subdir='models'
# )
# else:
# weights_path = get_file(
# 'efficientnet-b7_notop.h5',
# 'https://github.com/titu1994/keras-efficientnets/releases/download/v0.1/efficientnet-b7_notop.h5',
# cache_subdir='models'
# )
elif weights is not None:
model.load_weights(weights)
return model
def EfficientNetB0(input_shape=None,
include_top=True,
weights='imagenet',
input_tensor=None,
pooling=None,
classes=1000,
drop_rate=0.2,
drop_connect_rate=0.,
data_format=None):
return EfficientNet(input_shape,
get_default_block_list(),
width_coefficient=1.0,
depth_coefficient=1.0,
include_top=include_top,
weights=weights,
input_tensor=input_tensor,
pooling=pooling,
classes=classes,
drop_rate=drop_rate,
drop_connect_rate=drop_connect_rate,
data_format=data_format,
default_size=224)
def EfficientNetB1(input_shape=None,
include_top=True,
weights='imagenet',
input_tensor=None,
pooling=None,
classes=1000,
drop_rate=0.2,
drop_connect_rate=0.,
data_format=None):
return EfficientNet(input_shape,
get_default_block_list(),
width_coefficient=1.0,
depth_coefficient=1.1,
include_top=include_top,
weights=weights,
input_tensor=input_tensor,
pooling=pooling,
classes=classes,
drop_rate=drop_rate,
drop_connect_rate=drop_connect_rate,
data_format=data_format,
default_size=240)
def EfficientNetB2(input_shape=None,
include_top=True,
weights='imagenet',
input_tensor=None,
pooling=None,
classes=1000,
drop_rate=0.3,
drop_connect_rate=0.,
data_format=None):
return EfficientNet(input_shape,
get_default_block_list(),
width_coefficient=1.1,
depth_coefficient=1.2,
include_top=include_top,
weights=weights,
input_tensor=input_tensor,
pooling=pooling,
classes=classes,
drop_rate=drop_rate,
drop_connect_rate=drop_connect_rate,
data_format=data_format,
default_size=260)
def EfficientNetB3(input_shape=None,
include_top=True,
weights='imagenet',
input_tensor=None,
pooling=None,
classes=1000,
drop_rate=0.3,
drop_connect_rate=0.,
data_format=None):
return EfficientNet(input_shape,
get_default_block_list(),
width_coefficient=1.2,
depth_coefficient=1.4,
include_top=include_top,
weights=weights,
input_tensor=input_tensor,
pooling=pooling,
classes=classes,
drop_rate=drop_rate,
drop_connect_rate=drop_connect_rate,
data_format=data_format,
default_size=300)
def EfficientNetB4(input_shape=None,
include_top=True,
weights='imagenet',
input_tensor=None,
pooling=None,
classes=1000,
drop_rate=0.4,
drop_connect_rate=0.,
data_format=None):
return EfficientNet(input_shape,
get_default_block_list(),
width_coefficient=1.4,
depth_coefficient=1.8,
include_top=include_top,
weights=weights,
input_tensor=input_tensor,
pooling=pooling,
classes=classes,
drop_rate=drop_rate,
drop_connect_rate=drop_connect_rate,
data_format=data_format,
default_size=380)
def EfficientNetB5(input_shape=None,
include_top=True,
weights='imagenet',
input_tensor=None,
pooling=None,
classes=1000,
drop_rate=0.4,
drop_connect_rate=0.,
data_format=None):
return EfficientNet(input_shape,
get_default_block_list(),
width_coefficient=1.6,
depth_coefficient=2.2,
include_top=include_top,
weights=weights,
input_tensor=input_tensor,
pooling=pooling,
classes=classes,
drop_rate=drop_rate,
drop_connect_rate=drop_connect_rate,
data_format=data_format,
default_size=456)
def EfficientNetB6(input_shape=None,
include_top=True,
weights='imagenet',
input_tensor=None,
pooling=None,
classes=1000,
drop_rate=0.5,
drop_connect_rate=0.,
data_format=None):
return EfficientNet(input_shape,
get_default_block_list(),
width_coefficient=1.8,
depth_coefficient=2.6,
include_top=include_top,
weights=weights,
input_tensor=input_tensor,
pooling=pooling,
classes=classes,
drop_rate=drop_rate,
drop_connect_rate=drop_connect_rate,
data_format=data_format,
default_size=528)
def EfficientNetB7(input_shape=None,
include_top=True,
weights='imagenet',
input_tensor=None,
pooling=None,
classes=1000,
drop_rate=0.5,
drop_connect_rate=0.,
data_format=None):
return EfficientNet(input_shape,
get_default_block_list(),
width_coefficient=2.0,
depth_coefficient=3.1,
include_top=include_top,
weights=weights,
input_tensor=input_tensor,
pooling=pooling,
classes=classes,
drop_rate=drop_rate,
drop_connect_rate=drop_connect_rate,
data_format=data_format,
default_size=600)
if __name__ == '__main__':
model = EfficientNetB0(include_top=True)
model.summary() |
from flask import Blueprint
types = Blueprint('types', __name__, url_prefix='/classification')
# never forget
from . import routes |
#- 파이썬에서 기본적으로 제공하는 함수 ----- 내장함수
#크기비교함수
#max() - 최대값
print(max(3,7,-1,5,4))
#min() - 최소값
print(min(3,7,-1,5,4))
#연산함수
# sum() - 합계
print(sum([1,2,3,4,5,6,7,8,9,10]))
# pow() - 제곱근
print(pow(2,4))
# divmod() - 몫과 나머지를 구하는 함수
print(divmod(10,3))
#진법 변환 함수
# bin() - 2진수 변환
# act() - 8진수 변환
# hex() - 16진수 변환
# round() - 반올림함수,
print(round(123.456,1))
# abs() - 절대값 함수
print(abs(-5));
print(abs(5));
print("\\ /\\\n ) ( ') \n( / ) \n \\(__)|")
print()
print('|\\_/|\n|q p| /}\n( 0 )"""\\\n|"^"` |\n||_/=\\\\__|\n') |
print("let's play")
print("Think a number")
print("Don't tell me please")
print("Let's call this number A")
print("Now, add 5")
print("multiply the result by 2")
print("to what was left subtract 4")
print("divide the result by 2")
print("subtract the number you thought first")
X=2
A=(((((X+5)*2)-4))/2)-X
print("is your answer ?",A)
b=input("Do you wanna play again?")
if b=="Yes":
print("Ok, let's go")
else:
print("You are such a pussy")
print("fat cat!")
print("let's play")
print("Think a number")
print("Don't tell me please")
print("Let's call this number B")
print("Now, add 1")
print("multiply the result by 2")
print("add 7")
print("divide the result by 2")
print("subtract the number you thought first(B)")
X=2
B=(((((X+1)*2)+7))/2)-X
print("is your answer ?",B) |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2020/11/20 10:55 上午
# @Author : Alex
# @Site :
# @File : preprocess.py
# @Software: PyCharm
import pickle
from util.seeg_utils import *
def read_data():
for i in range(1, 10):
path = '../dataset/insomnia/ins{}.edf'.format(i)
data = read_edf_raw(path)
print("Start time:" + data.annotations.orig_time.strftime("%H:%M:%S"))
print(get_sampling_hz(data))
def read_st():
path = '../dataset/insomnia/ins2.edf.st'
data = pickle.load(open(path, 'r'))
print("yes")
read_data()
|
#!/usr/bin/env python
#-*-coding:utf-8-*-
# @File:Segment.py
# @Author: Michael.liu
# @Date:2019/2/12
# @Desc:
'''
这个包主要从原理上构建了一个搜索引擎,搜索引擎分为几个部分,可以参考
《搜索推荐:入门、实战与进阶》第2章图2.1描述的那样
'''
#(一)网络爬虫 ---- news_spider.py
#(二)构建索引 ---- build_index.py
#(三)检索模型 ---- search_engine.py
# (四) 相关性计算---- learn_related.py |
'''
Created on Dec 04, 2015
@author: Urjit Patel - up276
'''
from unittest import TestCase
import assignment10
import os
import glob
__author__ = 'up276'
class Test(TestCase):
'''
This Test class has total 4 test functions which are used to test the program functionality
'''
#Below function tests the cleaning function. Whether all unnecessary data has been removed or not.
def test_cleaning_function(self):
raw_data = assignment10.load_data()
raw_data = raw_data.ix[:30000]
cleaned_data = assignment10.clean_data(raw_data)
columnNames = cleaned_data.columns.values
for colname in columnNames:
self.assertEqual(cleaned_data[colname].isnull().sum(),0)
self.assertEqual(cleaned_data.GRADE.isin(['P','Z','Not Yet Graded']).sum(),0)
#Below function tests the function "test_grades_function"
def testing_for_test_grades_function(self):
self.assertEqual(assignment10.test_grades(['A','A','A']),0)
self.assertEqual(assignment10.test_grades(['C','B','A']),1)
self.assertEqual(assignment10.test_grades(['A','B','C']),-1)
#Below function tests the function "test_restaurant_grades". Here to save the time I am only loading first 30000 rows of data
def testing_for_test_restaurant_grades_function(self):
raw_data = assignment10.load_data()
raw_data = raw_data.ix[:30000]
cleaned_data = assignment10.clean_data(raw_data)
self.assertEqual(assignment10.test_restaurant_grades(40358429,cleaned_data),1)
#Below function tests whether all necessary files are getting generated or not at the end of the program. Here to save the time I am only loading first 30000 rows of data
def test_existance_of_generated_files(self):
try:
for filename in glob.glob('grade_improvement*.pdf') :
os.remove( filename ) #first remove all files which are already presented
except IOError:
pass
raw_data = assignment10.load_data()
raw_data = raw_data.ix[:30000]
cleaned_data = assignment10.clean_data(raw_data)
NYC_grade_count = assignment10.GradeCount(cleaned_data)
assignment10.PlotGraphs.Plot(NYC_grade_count,'grade_improvement_nyc.pdf')
Boroughs_grade_count = assignment10.BoroughsGradeCount(cleaned_data)
assignment10.PlotGraphs.PlotBoroughsGraphs(Boroughs_grade_count)
self.assertTrue(os.path.isfile('./grade_improvement_nyc.pdf'))
self.assertTrue(os.path.isfile('./grade_improvement_bronx.pdf'))
self.assertTrue(os.path.isfile('./grade_improvement_brooklyn.pdf'))
self.assertTrue(os.path.isfile('./grade_improvement_manhattan.pdf'))
self.assertTrue(os.path.isfile('./grade_improvement_queens.pdf'))
self.assertTrue(os.path.isfile('./grade_improvement_statn.pdf'))
|
#!/usr/bin/env python
#RI_PI
import time
import serial
import RPi.GPIO as GPIO
import time
import sys
import os
import thread as checkerThread #Safety checks during movement
import thread as timeThread #thread that checks if rotations are being counted
import thread as batteryThread
import RI_config as config
currentFile=1
timeSinceLast=0 # time since last rotation
maxSpeed=100
motorA=12 #motors
motorB=10
hallA=8 #Inturrupt
hallB=11
currentPin=7 #pulse current to keep battery active
distChange=0
dRot=0.4 #How far the ring moves with each activation of a hall effect sensor
GPIO.setwarnings(False)
SPICLK = 13
SPIMISO = 15
SPIMOSI = 16
SPICS = 18
##### change with RI_num, max and min measured between moving part and far end, NOT between rings
maxDist=40
minDist=2
ringOffset=17
#####
forceSetpoint = 0
RI_num = 7 #determines properties of the implant (maxDist, etc.)
terminator="\n" #terminator for serial input line
moving=False
motorActive=True
led=19
def calibrate(calibrationDist): # Give RPi absolute distance by giving starting point and calculating relative distance from there
global absDist
absDist=calibrationDist
updateFile()
def milToDigi(posnInMil): #conversion to digital (0-1023)
conversionConst=float(posnInMil)/(maxDist-minDist)
digiDist=int(round(1023*conversionConst))
return digiDist
def digiToMil(posDigi): #conversion to mm
conversionConst=float(posDigi/1023)
milDist=(maxDist-minDist)*conversionConst
return milDist
def safetyCheck(posMil): #Check boundaries at the ends of the implant
global motorActive
if posMil < minDist:
motorStop()
writeToSerial("ATTN: Rings too close together. Motors stopped.\n")
motorActive=False
elif posMil>maxDist:
motorStop()
writeToSerial("ATTN: Rings too far apart. Motors stopped.\n")
motorActive=False
def updateFile():
#current distance between clamps in mm
storeFile=open("/home/pi/RI_Implant/positionStorage"+str(currentFile)+".txt", "w")
storeFile.write(str(absDist)+"\n"+str(float(time.time()*1000))) #store absolute distance in a file in case of power failure
storeFile.close()
changeCurrentFile()
def moveToPosn(goalMil):#Start new threads for moving and safety checks
checkerThread.start_new_thread(checkGoal, (goalMil,))
time.sleep(0.05)
timeThread.start_new_thread(checkTime,())
def checkTime():
global lastTime
global motorActive
safetyTime=4000
lastTime=float(time.time()*1000)
while moving==True and motorActive==True:
currentTime=float(time.time()*1000)
if float(currentTime-lastTime)>safetyTime: #If moving and gone over 2 seconds without detecting a rotation, stop motor
motorStop()
motorActive=False
writeToSerial("ATTN: Motor rotations not detected. Movement Stopped.\n")
timeThread.exit()
time.sleep(0.1)
if motorActive==False:
writeToSerial("ATTN: Motor not enabled.\n")
timeThread.exit()
def checkGoal(goalMil): # Move to a position while performing safety checks
global moving
global motorActive
moving=True
try:
if goalMil > absDist:
pull()
while goalMil > absDist and motorActive==True:
safetyCheck(absDist)
time.sleep(0.1)
else:
compress()
while goalMil < absDist and motorActive==True:
safetyCheck(absDist)
if motorActive==False and absDist<minDist and goalMil>minDist: #allow movement out of danger zones
motorActive=True
pull()
writeToSerial("ATTN: Motor enabled to move away from the end.\n")
while goalMil > absDist and motorActive==True:
time.sleep(0.01)
elif motorActive==False and absDist>maxDist and goalMil<maxDist:
motorActive=True
compress()
writeToSerial("ATTN: Motor enabled to move away from the end.\n")
while goalMil < absDist and motorActive==True:
time.sleep(0.01)
except:
motorStop()
motorActive=False
writeToSerial("ATTN: Error. Motor stopped.\n")
motorStop()
moving=False
checkerThread.exit()
def writeToSerial(text):
ser.flushOutput()
ser.write(text)
# read SPI data from MCP3008 chip, 8 possible adc's (0 thru 7)
def readadc(adcnum, clockpin, mosipin, misopin, cspin):
if ((adcnum > 7) or (adcnum < 0)):
return -1
GPIO.output(cspin, True)
GPIO.output(clockpin, False) # start clock low
GPIO.output(cspin, False) # bring CS low
commandout = adcnum
commandout |= 0x18 # start bit + single-ended bit
commandout <<= 3 # we only need to send 5 bits here
for i in range(5):
if (commandout & 0x80):
GPIO.output(mosipin, True)
else:
GPIO.output(mosipin, False)
commandout <<= 1
GPIO.output(clockpin, True)
GPIO.output(clockpin, False)
adcout = 0
# read in one empty bit, one null bit and 10 ADC bits
for i in range(12):
GPIO.output(clockpin, True)
GPIO.output(clockpin, False)
adcout <<= 1
if (GPIO.input(misopin)):
adcout |= 0x1
GPIO.output(cspin, True)
adcout >>= 1 # first bit is 'null' so drop it
return adcout
def updateDistance():
global absDist
absDist = float(absDist) + float(distChange)
def magnetDetected(channel):
#inturrupt function to detect hall effect activation
if moving==True:
updateDistance() #only update distance if moving - prevents accidental double triggers when the implant is stopped or a fluxuation in voltage occurs
updateFile()
global lastTime
lastTime=float(time.time()*1000)
def magnetDetected2(channel):
if moving==True:
updateDistance() #only update distance if moving - prevents accidental double triggers when the implant is stopped or a fluxuation in voltage occurs
updateFile()
global lastTime
lastTime=float(time.time()*1000)
# Pull rings apart?
def pull():
if motorActive==True:
global distChange
distChange=dRot
pA.start(maxSpeed)
pB.start(0)
else:
motorStop()
# Pushes rings together
def compress():
if motorActive==True:
global distChange
distChange=-dRot
pB.start(maxSpeed)
pA.start(0)
else:
motorStop()
def slow(direction, spd): #move the rings at a % of maxSpeed
## 0=compress, 1=pull
global distChange
if direction == 0:
distChange=-1
pA.start(maxSpeed*spd)
pB.start(0)
else:
distChange=1
pB.start(maxSpeed*spd)
pA.start(0)
def getIP():
os.system("sudo ifconfig | grep inet.*Bcast.*Mask > /home/pi/RI_Implant/wlan0.txt")
f=open("/home/pi/RI_Implant/wlan0.txt", "r")
writeToSerial("ATTN: "+f.readline().strip(' ')+"\n")
def motorStop(): # Stop motors
pA.stop()
pB.stop()
def readForce(): # convert analog input from force sensor into digital input for RPi
forceSensor=0
force = readadc(forceSensor, SPICLK, SPIMOSI, SPIMISO, SPICS)
return force
def sendSerialData(): #Output serial data to laptop
global positionSetpoint
forceValue = readForce()
positionValue = float(absDist) + float(ringOffset)
voltage = 5000
currentTime=time.time()
timeStamp =(currentTime-startTime)*1000
pSet=float(positionSetpoint)+float(ringOffset) #setpoint
mSpeed=0
outputStr=("SNSR[%010lu] %4f %4i %4f %4i %4i %4i %4i\n" % (timeStamp, float(positionValue), int(forceValue), float(pSet), int(forceSetpoint), int(mSpeed), RI_num, int(voltage)))
print (outputStr)
writeToSerial(outputStr)
def initPins():# Setup GPIO in board mode
GPIO.setmode (GPIO.BOARD)
# set up the SPI interface pins
GPIO.setup(SPIMOSI, GPIO.OUT)
GPIO.setup(SPIMISO, GPIO.IN)
GPIO.setup(SPICLK, GPIO.OUT)
GPIO.setup(SPICS, GPIO.OUT)
GPIO.setup(motorA, GPIO.OUT, initial=0)
GPIO.setup(motorB, GPIO.OUT, initial=0)
GPIO.setup(currentPin, GPIO.OUT, initial=0)
GPIO.setup(hallA, GPIO.IN, pull_up_down=GPIO.PUD_DOWN)
GPIO.setup(hallB, GPIO.IN, pull_up_down=GPIO.PUD_DOWN)
GPIO.add_event_detect(hallA, GPIO.RISING, callback=magnetDetected, bouncetime=600)
#GPIO.add_event_detect(hallB, GPIO.RISING, callback=magnetDetected2, bouncetime=600)
global pA
pA=GPIO.PWM(motorA, 100)
global pB
pB=GPIO.PWM(motorB, 100)
def reconnect():
updateFile()
python=sys.executable
ser.close()
os.execl(python, python, * sys.argv)
def checkSerialInput(): # Parse serial inputs
global motorActive
global RI_num
global maxDist
global minDist
global ringOffset
flag=False
cmd=""
while flag==False: #read until the terminator symbol, \n, is detected
try:
c=ser.read(1)
if c==terminator:
flag=True
else:
cmd+=c
except:
motorStop()
motorActive=False
reconnect()
if cmd=="g": #handshake between laptop and Rpi
time.sleep(0.05)
sendSerialData()
elif cmd[0]=="P":
try:
dGoal=float(cmd[1:])
global positionSetpoint
if dGoal-ringOffset < minDist or dGoal-ringOffset > maxDist:
writeToSerial("ATTN: Command rejected. Position setpoint too close to an end.\n")
else:
if abs(dGoal-ringOffset-absDist)>=dRot:
motorStop()
writeToSerial("ATTN: Changing setpoint to "+str(dGoal)+ ".\n")
positionSetpoint=dGoal-ringOffset
moveToPosn(positionSetpoint)
else:
writeToSerial("ATTN: Position change below sensor resolution.\n")
except:
writeToSerial("ATTN: Command formatted incorrectly.\n")
elif cmd[0]=="I":
try:
RI_num=int(cmd[1:])
(maxDist, minDist, ringOffset) = config.riConfig(RI_num)
writeToSerial("ATTN: Changing Implant # to " + str(RI_num) +".\n")
except:
writeToSerial("ATTN: Command formatted incorrectly.\n")
elif cmd=="MSTRT":
writeToSerial("ATTN: Motor activated.\n")
motorActive=True
if abs(absDist-positionSetpoint)>=dRot:
moveToPosn(positionSetpoint)
elif cmd=="MSTOP":
motorStop()
writeToSerial("ATTN: Motor stopped.\n")
motorActive=False
elif cmd[0]=="C":
try:
calibrationDist=float(cmd[1:])
if calibrationDist < minDist or calibrationDist > maxDist:
writeToSerial("ATTN: Calibration distance too close to an end. Did you make a mistake in measuring?\n")
else:
calibrate(calibrationDist)
writeToSerial("ATTN: Calibrated.\n")
except:
writeToSerial("ATTN: Command formatted incorrectly.\n")
elif cmd[0]=="D":
try:
getIP()
except:
writeToSerial("ATTN: Error retrieving connection data.\n")
elif cmd[0]=="F":
writeToSerial("ATTN: minDist: "+str(minDist)+", maxDist: "+str(maxDist)+", ringOffset: "+str(ringOffset)+".\n")
else:
writeToSerial('ATTN: Command not recognized. Received @' + str(cmd) + '@\n')
time.sleep(0.05)
def keepBatteryAwake(): # pulse over a resistor every 1.4s to keep the battery awake
interval=1400
lastPulse=time.time()*1000
while True:
if time.time()*1000-lastPulse>interval:
GPIO.output(currentPin, 1)
time.sleep(0.04)
GPIO.output(currentPin, 0)
lastPulse=time.time()
def changeCurrentFile():
global currentFile
if currentFile==1:
currentFile=2
elif currentFile==2:
currentFile=1
initPins()
#batteryThread.start_new_thread(keepBatteryAwake, ())
serialFlag=False
while serialFlag==False: #Don't start until connection is aquired
try:
ser=serial.Serial('/dev/rfcomm0')
serialFlag=True
print ("Connected")
ser.flushInput()
ser.flushOutput()
except :
print ("Could not connect to rfcomm0")
time.sleep(3)
ser.timeout=1
ser.baudrate=115200
startTime=time.time()
tsA=0
tsB=0
absA=0
absB=0
try:
storeFile=open("/home/pi/RI_Implant/positionStorage"+str(currentFile)+".txt", "r") #File for position storage to give long term memory in case of power failure
absA=float(storeFile.readline())
tsA=float(storeFile.readline())
storeFile.close()
except:
print("Error reading /home/pi/RI_Implant/positionStorage"+str(currentFile)+"\n")
changeCurrentFile()
try:
storeFile=open("/home/pi/RI_Implant/positionStorage"+str(currentFile)+".txt", "r") #File for position storage to give long term memory in case of power failure
absB=float(storeFile.readline())
tsB=float(storeFile.readline())
storeFile.close()
except:
print("Error reading /home/pi/RI_Implant/positionStorage"+str(currentFile)+"\n")
if tsA>tsB:
absDist=absA
else:
absDist=absB
positionSetpoint=absDist
(maxDist, minDist, ringOffset) = config.riConfig(RI_num)
while True:
checkSerialInput()
GPIO.cleanup()
# RI_implant
|
# coding=utf8
import gensim
import pickle
from gensim.models import Word2Vec,LdaModel,KeyedVectors
from gensim.models.callbacks import CallbackAny2Vec
import jieba
import numpy as np
from sklearn.decomposition import PCA
from scipy.spatial.distance import cosine
import re
from collections import Counter
import math
from sen2vec import sentence_to_vec
import os
import warnings
import networkx as nx
warnings.filterwarnings("ignore")
# split sentence 同时可以恢复标点符号
def split_sentence(sentence):
result= re.split('[。??!!\r\n]',sentence)
senswith_notation=[]
for index , s in enumerate(result):
backward_length = list(map(len,result[:index+1]))
total_length = sum(backward_length) + index #每次分裂都会少一个字符,所以要加上index
if total_length< len(sentence):
notation = sentence[total_length]
senswith_notation.append(s + notation)
result = list(filter(lambda x: len(x)>3,senswith_notation))
result_cleaned = []
while result:
sen = result.pop()
if np.sum([str.strip(sen) == str.strip(s) for s in result])==0 and \
bool(re.search('[。??!!]',sen)): result_cleaned.insert(0,sen) # remove duplicated sentence, remove subtitle in the document (which has no puctuation)
else: continue
return result_cleaned
# get sentence tfidf
def get_tfidfs(sentece,word_idf,stop_words,threshhold):
words_filtered = list(filter(lambda x :bool(re.findall('\w+',x)) and (x not in stop_words) , jieba.cut(sentece)) )
sen_word_count = Counter(words_filtered)
tfidfs = []
for a, b in sen_word_count.most_common():
if a in word_idf:
tfidfs.append((a, b/len(words_filtered) * word_idf[a]))
else :
tfidfs.append((a, b/len(words_filtered)))
return sorted(tfidfs,key=lambda x: x[1],reverse=True)[:threshhold]
# get textrank result
def get_textrank(sens_embedding, sens,tfidfs,para_title, para_keyword,para_fisrtsen):
# keyword overlap
sens_keywords = {}
key_words = [a for a,b in tfidfs]
for index, sen in enumerate(sens):
words = list(jieba.cut(sen))
sens_keyword = [w for w in words if w in key_words]
sens_keywords[index] = sens_keyword
# create graph
G= nx.Graph()
edges = []
for i , v1 in enumerate(sens_embedding):
for j, v2 in enumerate(sens_embedding[i+1 :]):
com_keyword_num = len(set(sens_keywords[i]) & set(sens_keywords[i+j+1]))
# we decrease cosin distance between sens based on commmon key word number
score = cosine(v1,v2)*(1- com_keyword_num*para_keyword)
if i ==0:
score = score * para_title # weight for relation with title
edges.append((i,j+i+1,score))
else:
edges.append((i,j+i+1,score))
G.add_weighted_edges_from(edges)
# pagerank
page_rank = nx.pagerank(G,weight='weight')
# weight first sentense
page_rank[1] = page_rank[1] * para_fisrtsen
# sorted based on ranking values
result = sorted(zip(page_rank.items(),sens,sens_embedding),key=lambda x: x[0][1])
return result, G
def autosummation(title,doc,model,word_sifs,stop_words,word_idf):
# get titile+document embedding
spl_sens = split_sentence(title+ '。'+doc)
sens_embedding1 , spl_sens_cleared = sentence_to_vec(spl_sens,100,model,word_sifs,stop_words,)
# get document keywords via tfidf
tfidfs = get_tfidfs(doc,word_idf,stop_words,10)
# get textrank
result ,G = get_textrank(sens_embedding1,spl_sens_cleared,tfidfs,0.5,0.05,0.8)
# sort based on original sequence in document
key_sentences = sorted(result[:4],key= lambda x: x[0][0])
return ''.join([b for a,b,c in key_sentences])
if __name__ == '__main__':
# load model
# model_path = os.path.join(os.path.abspath('./'),'word2vector_Model','word2vec.kv')
# model = KeyedVectors.load(model_path,mmap='r')
# # load sif
# word_sifs =pickle.load(open('data\word_sifs.plk','rb'))
# # load stopwords
# stop_words = pickle.load(open('data\stop_words.plk','rb'))
# # load idf
# word_idf = pickle.load(open('data\word_idf.plk','rb'))
# test
title= '''中美联合研究:3月1日美国或已有9484例新冠感染'''
doc = '''
美国现在有多少新冠肺炎患者?截至美国东部时间3月9日19时,美国有线电视新闻网给出的累计确诊数字为至少704。
然而,一项基于武汉直飞数据的研究评估认为,在3月1日,美国仅从武汉直接输入的病例就造成了1043至9484例感染。
3月6日,一个中美联合研究团队在预印本网站medRxiv上合作发表了题为《评估COVID-19在美流行规模:基于中国武汉直飞航线的模拟》(Estimating the scale of COVID-19 Epidemic in the United States: Simulations Based on Air Traffic Directly from Wuhan, China )的论文,尚未经过同行评议。
论文的通讯作者来自位于美国洛杉矶的西达赛奈(Cedars-Sinai)医疗中心。北京大学流行病与卫生统计系教授李立明、曹卫华和吕筠参与研究。
研究团队声明,分析有意采取了一系列保守性假设,再加上模型简化,可能会出现偏差,需读者理性判断。
例如,该研究只考虑武汉在封锁(1月23日)前直飞美国的人群,未计算从中国其余地区或其他国家(如韩国、意大利或伊朗)输入的病例。美国在3月1日前采取的相关监测和隔离措施也考虑进去了。
此外,研究假设这些武汉输入的病例在美诊断后就停止传播病毒。
武汉天河机场目前有两条直飞美国的航线,目的地分别是旧金山和纽约,鉴于相应期限的航空数据尚未更新,研究参考了以往可类比的数据。
截至论文写作的2月29日,美国公开报告了20个病例的信息,其中8例在发病前到过武汉,1例有过北京旅行史,4例未报告旅行信息,2例为人传人,5例未有中国旅行史或确诊病例接触史。
尽管美国先行采取了许多遏制措施,包括旅行警告、旅行禁令、入境筛查、接触者追踪等,但仍有多个病例未报告相关的旅行史或接触史,表明社区传播的可能性。论文假设美国疾控中心确定了50%以上的输入性病例。
至于建立新冠病毒的传播范围模型所需的其他一些关键因素,如基本传染数、潜伏期、人际传播时间间隔等,则参照了针对中国病例的现有研究,分别设置为2.1至2.5、6天和7.5天。
在最可能的参数设定下,分析模型显示截至3月1日,若此前采取的措施并未成功减少未确诊病例的传播,美国有9484例感染(90%置信区间,2054到24241);若措施降低了25%的为确诊病例传播,则感染数字为1043(90%置信区间,107到2474)。
论文表示,在对疾病传播“过度保守的设定”和对美国疾控措施“过度乐观的假设”下,模型依然显示3月1日美国出现千名传染病例。研究团队估计,真实的数字可能介于1000至10000之间。这暗示着,在早期流行阶段就控制COVID-19的机会窗口正在关闭。
论文也引述了一份Bedford实验室在3月2日发表的评估,即新冠病毒已经在西雅图地区社区传播了6周,该地区的感染人数应达到了570例。根据论文的模型,像西雅图这样的社区聚集传播不止一处。
鉴于减少25%的传播,就能将模型评估的感染规模降低至近10%,论文作者建议采取积极的遏制手段,如大规模筛查、减少大规模聚集等。
最后,论文提到,由于新冠病毒在非亚洲群体中的传播动态范围几乎没有参考数据,只能基于中国的流行情况判断,因此也存在高估美国感染人数的可能性。毕竟,传播指数与社会经济、文化、环境因素都有关联。
'''
split_sentence(doc)
# summation output
result = autosummation(title,doc,model,word_sifs,stop_words,word_idf)
print(result)
|
#!/usr/bin/env python3
class Command:
def __init__(self, module, message):
self.module = module
self.message = message |
#!/usr/bin/env python
from setuptools import setup, find_packages
setup(
name="rearr",
version="0.2.0",
description="Rearrange code inside Python modules",
author="Michel Albert",
author_email="michel@albert.lu",
url="http://github.com/exhuma/rearr",
license="MIT",
install_requires=[
"parso",
],
packages=find_packages(),
entry_points={
"console_scripts": {
"rearr=rearr.rearr:main",
}
},
classifiers=[
"Environment :: Console",
"License :: OSI Approved :: MIT License",
"Programming Language :: Python",
],
)
|
""" Passive Markers
Fluid simulation with additional marker fields that are passively transported with the fluid.
The dense marker is sampled on a regular grid while the sparse marker is a collection of particles.
"""
from phi.flow import *
DOMAIN = dict(x=64, y=64, bounds=Box(x=100, y=100))
DT = 0.2
INITIAL_LOC = math.meshgrid(x=8, y=8).pack('x,y', instance('points')) * 10. + 10.
velocity = StaggeredGrid(Noise(vector='x,y', scale=100), 0, **DOMAIN) * 4
sparse_marker = PointCloud(Sphere(INITIAL_LOC, 2), 1, 0, bounds=DOMAIN['bounds'])
dense_marker = CenteredGrid(sparse_marker.elements, ZERO_GRADIENT, x=200, y=200, bounds=DOMAIN['bounds'])
for _ in view(framerate=10, play=False, namespace=globals()).range():
velocity, _ = fluid.make_incompressible(velocity)
dense_marker = advect.advect(dense_marker, velocity, DT)
sparse_marker = advect.advect(sparse_marker, velocity, DT)
velocity = advect.semi_lagrangian(velocity, velocity, DT)
|
import argparse
import os
import sys
from django.conf import settings
from django.template import Context, Template
from django.utils.encoding import smart_str
class Command(object):
"""Base command class.
A valid administrative command must inherit from this class.
"""
help = "No help available." # NOQA:A003
def __init__(self, commands, verbose=False):
self._commands = commands
self._parser = argparse.ArgumentParser()
self._verbose = verbose
if not settings.configured:
settings.configure(
TEMPLATES=[{
"BACKEND": (
"django.template.backends.django.DjangoTemplates")
}]
)
self._templates_dir = "%s/templates" % os.path.dirname(__file__)
def _render_template(self, tplfile, env):
"""Render an HTML template."""
with open(tplfile) as fp:
tpl = Template(fp.read())
return tpl.render(Context(env))
def run(self, cmdline):
args = self._parser.parse_args(cmdline)
self.handle(args)
def handle(self, parsed_args):
"""A command must overload this method to be called
:param parsed_args:
"""
raise NotImplementedError
def scan_for_commands(dirname=""):
"""Build a dictionnary containing all commands
:param str dirname: the directory where commands are located
:return: a dict of commands (name : class)
"""
path = os.path.join(os.path.dirname(__file__), dirname)
result = {}
for f in os.listdir(path):
if f in [".", "..", "__init__.py"]:
continue
if not f.endswith(".py"):
continue
if os.path.isfile(f):
continue
cmdname = f.replace(".py", "")
cmdmod = __import__("modoboa.core.commands", globals(), locals(),
[smart_str(cmdname)])
cmdmod = getattr(cmdmod, cmdname)
if "_" in cmdname:
cmdclassname = "".join(
[s.capitalize() for s in cmdname.split("_")])
else:
cmdclassname = cmdname.capitalize()
try:
cmdclass = getattr(cmdmod, "%sCommand" % cmdclassname)
except AttributeError:
continue
result[cmdname] = cmdclass
return result
def handle_command_line():
"""Parse the command line."""
commands = scan_for_commands()
parser = argparse.ArgumentParser(
description="A set of utilities to ease the installation of Modoboa.",
epilog="""Available commands:
%s
""" % "\n".join(["\t%s" % c for c in sorted(commands)]))
parser.add_argument("--verbose", action="store_true",
help="Activate verbose output")
parser.add_argument("command", type=str,
help="A valid command name")
(args, remaining) = parser.parse_known_args()
if args.command not in commands:
print("Unknown command '%s'" % args.command, file=sys.stderr)
sys.exit(1)
commands[args.command](commands, verbose=args.verbose).run(remaining)
|
from django import forms
class UploadTorrentForm(forms.Form):
title = forms.CharField(max_length=200)
torrent = forms.FileField()
|
# Copyright (c) 2014 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
{
'make_global_settings': [
['CC', '/usr/bin/clang'],
['CXX', '/usr/bin/clang++'],
],
'targets': [
{
'target_name': 'ExtensionContainer',
'product_name': 'ExtensionContainer',
'type': 'executable',
'mac_bundle': 1,
'mac_bundle_resources': [
'ExtensionContainer/Base.lproj/Main.storyboard',
],
'sources': [
'ExtensionContainer/AppDelegate.h',
'ExtensionContainer/AppDelegate.m',
'ExtensionContainer/ViewController.h',
'ExtensionContainer/ViewController.m',
'ExtensionContainer/main.m',
],
'copies': [
{
'destination': '<(PRODUCT_DIR)/ExtensionContainer.app/PlugIns',
'files': [
'<(PRODUCT_DIR)/ActionExtension.appex',
]}],
'dependencies': [
'ActionExtension'
],
'link_settings': {
'libraries': [
'$(SDKROOT)/System/Library/Frameworks/Foundation.framework',
'$(SDKROOT)/System/Library/Frameworks/UIKit.framework',
],
},
'xcode_settings': {
'OTHER_CFLAGS': [
'-fobjc-abi-version=2',
],
'INFOPLIST_FILE': 'ExtensionContainer/Info.plist',
'GCC_VERSION': 'com.apple.compilers.llvm.clang.1_0',
'ARCHS': [ 'armv7' ],
'SDKROOT': 'iphoneos',
'IPHONEOS_DEPLOYMENT_TARGET': '7.0',
'CODE_SIGN_IDENTITY[sdk=iphoneos*]': 'iPhone Developer',
},
},
{
'target_name': 'ActionExtension',
'product_name': 'ActionExtension',
'type': 'executable',
'mac_bundle': 1,
'ios_app_extension': 1,
'sources': [
'ActionExtension/ActionViewController.h',
'ActionExtension/ActionViewController.m',
],
'link_settings': {
'libraries': [
'$(SDKROOT)/System/Library/Frameworks/Foundation.framework',
'$(SDKROOT)/System/Library/Frameworks/UIKit.framework',
'$(SDKROOT)/System/Library/Frameworks/MobileCoreServices.framework',
],
},
'xcode_settings': {
'OTHER_CFLAGS': [
'-fobjc-abi-version=2',
],
'INFOPLIST_FILE': 'ActionExtension/Info.plist',
'GCC_VERSION': 'com.apple.compilers.llvm.clang.1_0',
'ARCHS': [ 'armv7' ],
'SDKROOT': 'iphoneos',
'IPHONEOS_DEPLOYMENT_TARGET': '7.0',
'CODE_SIGN_IDENTITY[sdk=iphoneos*]': 'iPhone Developer',
},
},
],
}
|
###
### Copyright (C) 2018-2019 Intel Corporation
###
### SPDX-License-Identifier: BSD-3-Clause
###
from ....lib import *
from ..util import *
import os
@slash.requires(have_ffmpeg)
@slash.requires(have_ffmpeg_vaapi_accel)
@platform_tags(ALL_PLATFORMS)
class TranscoderTest(slash.Test):
requirements = dict(
# ffmpeg-vaapi HW decoders are built-in
decode = {
"avc" : dict(
sw = (ALL_PLATFORMS, have_ffmpeg_decoder("h264"), "h264"),
hw = (AVC_DECODE_PLATFORMS, have_ffmpeg_decoder("h264"), "h264"),
),
"hevc-8" : dict(
sw = (ALL_PLATFORMS, have_ffmpeg_decoder("hevc"), "hevc"),
hw = (HEVC_DECODE_8BIT_PLATFORMS, have_ffmpeg_decoder("hevc"), "hevc"),
),
"mpeg2" : dict(
sw = (ALL_PLATFORMS, have_ffmpeg_decoder("mpeg2video"), "mpeg2video"),
hw = (MPEG2_DECODE_PLATFORMS, have_ffmpeg_decoder("mpeg2video"), "mpeg2video"),
),
"mjpeg" : dict(
sw = (ALL_PLATFORMS, have_ffmpeg_decoder("mjpeg"), "mjpeg"),
hw = (JPEG_DECODE_PLATFORMS, have_ffmpeg_decoder("mjpeg"), "mjpeg"),
),
"vc1" : dict(
sw = (ALL_PLATFORMS, have_ffmpeg_decoder("vc1"), "vc1"),
hw = (VC1_DECODE_PLATFORMS, have_ffmpeg_decoder("vc1"), "vc1"),
),
},
encode = {
"avc" : dict(
sw = (ALL_PLATFORMS, have_ffmpeg_encoder("libx264"), "libx264"),
hw = (AVC_ENCODE_PLATFORMS, have_ffmpeg_encoder("h264_vaapi"), "h264_vaapi"),
),
"hevc-8" : dict(
sw = (ALL_PLATFORMS, have_ffmpeg_encoder("libx265"), "libx265"),
hw = (HEVC_ENCODE_8BIT_PLATFORMS, have_ffmpeg_encoder("hevc_vaapi"), "hevc_vaapi"),
),
"mpeg2" : dict(
sw = (ALL_PLATFORMS, have_ffmpeg_encoder("mpeg2video"), "mpeg2video"),
hw = (MPEG2_ENCODE_PLATFORMS, have_ffmpeg_encoder("mpeg2_vaapi"), "mpeg2_vaapi"),
),
"mjpeg" : dict(
sw = (ALL_PLATFORMS, have_ffmpeg_encoder("mjpeg"), "mjpeg"),
hw = (JPEG_ENCODE_PLATFORMS, have_ffmpeg_encoder("mjpeg_vaapi"), "mjpeg_vaapi"),
),
},
vpp = {
"scale" : dict(
sw = (ALL_PLATFORMS, have_ffmpeg_filter("scale"), "scale=w={width}:h={height}"),
hw = (VPP_PLATFORMS, have_ffmpeg_filter("scale_vaapi"), "scale_vaapi=w={width}:h={height}"),
),
},
)
# hevc implies hevc 8 bit
requirements["encode"]["hevc"] = requirements["encode"]["hevc-8"]
requirements["decode"]["hevc"] = requirements["decode"]["hevc-8"]
def before(self):
self.refctx = []
def get_requirements_data(self, ttype, codec, mode):
return self.requirements[ttype].get(
codec, {}).get(
mode, ([], (False, "{}:{}:{}".format(ttype, codec, mode)), None))
def get_decoder(self, codec, mode):
_, _, decoder = self.get_requirements_data("decode", codec, mode)
assert decoder is not None, "failed to find a suitable decoder: {}:{}".format(codec, mode)
return decoder.format(**vars(self))
def get_encoder(self, codec, mode):
_, _, encoder = self.get_requirements_data("encode", codec, mode)
assert encoder is not None, "failed to find a suitable encoder: {}:{}".format(codec, mode)
return encoder.format(**vars(self))
def get_vpp_scale(self, width, height, mode):
if width is None and height is None:
return None
_, _, scale = self.get_requirements_data("vpp", "scale", mode)
assert scale is not None, "failed to find a suitable vpp scaler: {}".format(mode)
return scale.format(width = width or self.width, height = height or self.height)
def get_file_ext(self, codec):
return {
"avc" : "h264",
"hevc" : "h265",
"hevc-8" : "h265",
"mpeg2" : "m2v",
"mjpeg" : "mjpeg",
}.get(codec, "???")
def validate_spec(self):
from slash.utils.pattern_matching import Matcher
assert len(self.outputs), "Invalid test case specification, outputs data empty"
assert self.mode in ["sw", "hw"], "Invalid test case specification as mode type not valid"
# generate platform list based on test runtime parameters
iplats, ireq, _ = self.get_requirements_data("decode", self.codec, self.mode)
platforms = set(iplats)
requires = [ireq,]
for output in self.outputs:
codec = output["codec"]
mode = output["mode"]
assert mode in ["sw", "hw"], "Invalid test case specification as output mode type not valid"
oplats, oreq, _ = self.get_requirements_data("encode", codec, mode)
platforms &= set(oplats)
requires.append(oreq)
if output.get("width", None) is not None or output.get("height", None) is not None:
oplats, oreq, _ = self.get_requirements_data("vpp", "scale", mode)
platforms &= set(oplats)
requires.append(oreq)
# create matchers based on command-line filters
matchers = [Matcher(s) for s in slash.config.root.run.filter_strings]
# check if user supplied any platform tag on command line
pmatch = any(map(lambda p: any([m.matches(p) for m in matchers]), ALL_PLATFORMS))
# if user supplied a platform tag, check if this test can support it via the
# test param-based required platforms
if pmatch and not any(map(lambda p: any([m.matches(p) for m in matchers]), platforms)):
slash.skip_test("unsupported platform")
# check required
if not all([t for t,m in requires]):
slash.skip_test(
"One or more software requirements not met: {}".format(
str([m for t,m in requires if not t])))
def gen_input_opts(self):
opts = "-init_hw_device vaapi=hw:/dev/dri/renderD128 -filter_hw_device hw"
opts += " -hwaccel_output_format vaapi"
if "hw" == self.mode:
opts += " -hwaccel vaapi"
opts += " -c:v {}".format(self.get_decoder(self.codec, self.mode))
opts += " -i {source}"
return opts.format(**vars(self))
def gen_output_opts(self):
self.goutputs = dict()
opts = "-an"
for n, output in enumerate(self.outputs):
codec = output["codec"]
mode = output["mode"]
encoder = self.get_encoder(codec, mode)
ext = self.get_file_ext(codec)
filters = []
tmode = (self.mode, mode)
if ("hw", "sw") == tmode: # HW to SW transcode
filters.extend(["hwdownload", "format=nv12"])
elif ("sw", "hw") == tmode: # SW to HW transcode
filters.append("format=nv12")
if "hw" == mode: # SW/HW to HW transcode
filters.append("hwupload")
vppscale = self.get_vpp_scale(
output.get("width", None), output.get("height", None), mode)
if vppscale is not None:
filters.append(vppscale)
for channel in xrange(output.get("channels", 1)):
ofile = get_media()._test_artifact(
"{}_{}_{}.{}".format(self.case, n, channel, ext))
self.goutputs.setdefault(n, list()).append(ofile)
if len(filters):
opts += " -vf '{}'".format(','.join(filters))
opts += " -c:v {}".format(encoder)
opts += " -vframes {frames}"
opts += " -y {}".format(ofile)
# dump decoded source to yuv for reference comparison
self.srcyuv = get_media()._test_artifact(
"src_{case}.yuv".format(**vars(self)))
if "hw" == self.mode:
opts += " -vf 'hwdownload,format=nv12'"
opts += " -pix_fmt yuv420p -f rawvideo"
opts += " -vframes {frames} -y {srcyuv}"
return opts.format(**vars(self))
def check_output(self):
m = re.search(
"not supported for hardware decode", self.output, re.MULTILINE)
assert m is None, "Failed to use hardware decode"
m = re.search(
"hwaccel initialisation returned error", self.output, re.MULTILINE)
assert m is None, "Failed to use hardware decode"
def transcode(self):
self.validate_spec()
iopts = self.gen_input_opts()
oopts = self.gen_output_opts()
get_media().test_call_timeout = vars(self).get("call_timeout", 0)
self.output = call("ffmpeg -v verbose {} {}".format(iopts, oopts))
self.check_output()
for n, output in enumerate(self.outputs):
for channel in xrange(output.get("channels", 1)):
encoded = self.goutputs[n][channel]
yuv = get_media()._test_artifact(
"{}_{}_{}.yuv".format(self.case, n, channel))
vppscale = self.get_vpp_scale(self.width, self.height, "sw")
call(
"ffmpeg -v verbose -i {} -vf '{}' -pix_fmt yuv420p -f rawvideo"
" -vframes {} -y {}".format(
encoded, vppscale, self.frames, yuv))
self.check_metrics(yuv, refctx = [(n, channel)])
# delete yuv file after each iteration
get_media()._purge_test_artifact(yuv)
def check_metrics(self, yuv, refctx):
get_media().baseline.check_psnr(
psnr = calculate_psnr(
self.srcyuv, yuv,
self.width, self.height,
self.frames),
context = self.refctx + refctx,
)
|
# -*- coding: utf-8 -*-
"""
Created on Wed Aug 18 23:33:01 2021
@author: chanchanchan
"""
import streamlit as st
import pandas as pd
import numpy as np
from matplotlib import pyplot as plt
import plotly.express as px
import plotly.graph_objects as go
from plotly.subplots import make_subplots
import DissertationPlotwithDataMain as main
import FastFouriorTransform as faft
import TransferFunction as tf
def app():
st.header('Frequency Domain Interpretation')
st.subheader('Transfer Function')
#generate different select tabs with frequencies
signal= st.sidebar.selectbox('Frequency of Input Signal:', ['3kHz', '4kHz', '5kHz', '6kHz', '7kHz'])
if signal == '3kHz':
#input and output gain
gaininout = go.Figure()
gaininout.add_trace(go.Scatter(x = main.change_in_frequency[0:23], y = faft.mag_fft_data3_input[0:23], mode = 'lines+markers', name = 'Input'))
gaininout.add_trace(go.Scatter(x = main.change_in_frequency[0:23], y = faft.mag_fft_data3_output[0:23], mode = 'lines+markers', name = 'Output'))
gaininout.update_layout( title={'text':"Input and Output Signal Magnitude",'y':0.85,'x':0.45,'xanchor': 'center','yanchor': 'top'}, xaxis_title="Frequency (kHz)", yaxis_title="Magnitude (Arbitary Units)")
gaininout.update_xaxes(range = [0, 11])
gaininout.update_yaxes(range = [0, 1000])
st.write(gaininout)
#input and output stacked phase
phaseinout = go.Figure()
phaseinout.add_trace(go.Scatter(x = main.change_in_frequency[0:23], y = faft.stacked_phase3in[0:23], mode = 'markers', name = 'Input'))
phaseinout.add_trace(go.Scatter(x = main.change_in_frequency[0:23], y = faft.stacked_phase3out[0:23], mode = 'markers', name = 'Output'))
phaseinout.update_layout( title={'text':"Input and Output Signal Stacked Phase",'y':0.85,'x':0.45,'xanchor': 'center','yanchor': 'top'}, xaxis_title="Frequency (kHz)", yaxis_title="Stacked Phase (Degrees)")
phaseinout.update_xaxes(range = [0, 11])
phaseinout.update_yaxes(range = [0, 4000])
st.write(phaseinout)
#stacked phase vs L/lambda
phaseout = go.Figure()
phaseout.add_trace(go.Scatter(x = tf.D_lambda3[0:23], y = tf.stacked_phase3_trans[0:23], mode = 'lines+markers', name = 'Output'))
phaseout.update_layout( title={'text':"Stacked Phase of Output Signal against L/lambda",'y':0.85,'x':0.45,'xanchor': 'center','yanchor': 'top'}, xaxis_title="L/lamda", yaxis_title="Stacked Phase (Degrees)")
phaseout.update_xaxes(range = [0, 5.5])
phaseout.update_yaxes(range = [-500, 2500])
st.write(phaseout)
#Gain and Phase plot vs frequnecy
fig = make_subplots(specs=[[{"secondary_y": True}]])
fig.add_trace(go.Scatter(x=main.change_in_frequency[0:23], y=tf.stacked_phase3_trans[0:23], mode = 'lines+markers', name="Stacked Phase"),secondary_y=False,)
fig.add_trace(go.Scatter(x=main.change_in_frequency[0:23], y=tf.mag3_trans[0:23], mode = 'lines+markers', name="Gain Factor"),secondary_y=True,)
fig.update_xaxes(title_text="Frequency (kHz)")
fig.update_yaxes(title_text="Phase (degrees)", secondary_y=False)
fig.update_yaxes(title_text="Gain (Arbitary Units)", secondary_y=True)
fig.update_layout( title={'text':'Gain Factor and Stacked Phase of the Bender Element Signal','y':0.85,'x':0.43,'xanchor': 'center','yanchor': 'top'})
fig.update_xaxes(range = [0, 11])
fig.update_yaxes(range = [-500, 2000], secondary_y=False)
fig.update_yaxes(range = [0, 4.5], secondary_y=True)
st.write(fig)
#arrival time vs frequency
arrival = go.Figure()
arrival.add_trace(go.Scatter(x = main.change_in_frequency[1:23], y = tf.slope3_trans[0:22], mode ='lines+markers', name = 'Frequency Interval = 0.49kHz'))
arrival.add_trace(go.Scatter(x = main.change_in_frequency[7:12], y = tf.Tarr3_list, mode = 'lines', name = 'Frequency Interval = 1.96kHz'))
arrival.update_layout( title={'text':"Shear Wave Arrival Time",'y':0.85,'x':0.45,'xanchor': 'center','yanchor': 'top'}, xaxis_title="Frequency (kHz)", yaxis_title="Arrival Time (ms)")
arrival.update_xaxes(range = [0, 11])
arrival.update_yaxes(range = [-200, 1600])
st.write(arrival)
if signal == '4kHz':
#input and output gain
gaininout = go.Figure()
gaininout.add_trace(go.Scatter(x = main.change_in_frequency[0:23], y = faft.mag_fft_data4_input[0:23], mode = 'lines+markers', name = 'Input'))
gaininout.add_trace(go.Scatter(x = main.change_in_frequency[0:23], y = faft.mag_fft_data4_output[0:23], mode = 'lines+markers', name = 'Output'))
gaininout.update_layout( title={'text':"Input and Output Signal Magnitude",'y':0.85,'x':0.45,'xanchor': 'center','yanchor': 'top'}, xaxis_title="Frequency (kHz)", yaxis_title="Magnitude (Arbitary Units)")
gaininout.update_xaxes(range = [0, 11])
gaininout.update_yaxes(range = [0, 1000])
st.write(gaininout)
#input and output stacked phase
phaseinout = go.Figure()
phaseinout.add_trace(go.Scatter(x = main.change_in_frequency[0:23], y = faft.stacked_phase4in[0:23], mode = 'markers', name = 'Input'))
phaseinout.add_trace(go.Scatter(x = main.change_in_frequency[0:23], y = faft.stacked_phase4out[0:23], mode = 'markers', name = 'Output'))
phaseinout.update_layout( title={'text':"Input and Output Signal Stacked Phase",'y':0.85,'x':0.45,'xanchor': 'center','yanchor': 'top'}, xaxis_title="Frequency (kHz)", yaxis_title="Stacked Phase (Degrees)")
phaseinout.update_xaxes(range = [0, 11])
phaseinout.update_yaxes(range = [0, 4000])
st.write(phaseinout)
#stacked phase vs L/lambda
phaseout = go.Figure()
phaseout.add_trace(go.Scatter(x = tf.D_lambda4[0:19], y = tf.stacked_phase4_trans[0:19], mode = 'lines+markers', name = 'Output'))
phaseout.update_layout( title={'text':"Stacked Phase of Output Signal against L/lambda",'y':0.85,'x':0.45,'xanchor': 'center','yanchor': 'top'}, xaxis_title="L/lamda", yaxis_title="Stacked Phase (Degrees)")
phaseout.update_xaxes(range = [0, 5.5])
phaseout.update_yaxes(range = [-500, 2500])
st.write(phaseout)
#Gain and Phase plot vs frequnecy
fig = make_subplots(specs=[[{"secondary_y": True}]])
fig.add_trace(go.Scatter(x=main.change_in_frequency[0:23], y=tf.stacked_phase4_trans[0:23], mode = 'lines+markers', name="Stacked Phase"),secondary_y=False,)
fig.add_trace(go.Scatter(x=main.change_in_frequency[0:23], y=tf.mag4_trans[0:23], mode = 'lines+markers', name="Gain Factor"),secondary_y=True,)
fig.update_xaxes(title_text="Frequency (kHz)")
fig.update_yaxes(title_text="Phase (degrees)", secondary_y=False)
fig.update_yaxes(title_text="Gain (Arbitary Units)", secondary_y=True)
fig.update_layout( title={'text':'Gain Factor and Stacked Phase of the Bender Element Signal','y':0.85,'x':0.43,'xanchor': 'center','yanchor': 'top'})
fig.update_xaxes(range = [0, 11])
fig.update_yaxes(range = [-500, 1600], secondary_y=False)
fig.update_yaxes(range = [0, 1.6], secondary_y=True)
st.write(fig)
#arrival time vs frequency
arrival = go.Figure()
arrival.add_trace(go.Scatter(x = main.change_in_frequency[1:23], y = tf.slope4_trans[0:22], mode = 'lines+markers', name = 'Frequency Interval = 0.49kHz'))
arrival.add_trace(go.Scatter(x = main.change_in_frequency[7:12], y = tf.Tarr4_list, mode = 'lines', name = 'Frequency Interval = 1.96kHz'))
arrival.update_layout( title={'text':"Shear Wave Arrival Time",'y':0.85,'x':0.45,'xanchor': 'center','yanchor': 'top'}, xaxis_title="Frequency (kHz)", yaxis_title="Arrival Time (ms)")
arrival.update_xaxes(range = [0, 11])
arrival.update_yaxes(range = [-400, 1600])
st.write(arrival)
if signal == '5kHz':
#input and output gain
gaininout = go.Figure()
gaininout.add_trace(go.Scatter(x = main.change_in_frequency[0:23], y = faft.mag_fft_data5_input[0:23], mode = 'lines+markers', name = 'Input'))
gaininout.add_trace(go.Scatter(x = main.change_in_frequency[0:23], y = faft.mag_fft_data5_output[0:23], mode = 'lines+markers', name = 'Output'))
gaininout.update_layout( title={'text':"Input and Output Signal Magnitude",'y':0.85,'x':0.45,'xanchor': 'center','yanchor': 'top'}, xaxis_title="Frequency (kHz)", yaxis_title="Magnitude (Arbitary Units)")
gaininout.update_xaxes(range = [0, 11])
gaininout.update_yaxes(range = [0, 1000])
st.write(gaininout)
#input and output stacked phase
phaseinout = go.Figure()
phaseinout.add_trace(go.Scatter(x = main.change_in_frequency[0:23], y = faft.stacked_phase5in[0:23], mode = 'markers', name = 'Input'))
phaseinout.add_trace(go.Scatter(x = main.change_in_frequency[0:23], y = faft.stacked_phase5out[0:23], mode = 'markers', name = 'Output'))
phaseinout.update_layout( title={'text':"Input and Output Signal Stacked Phase",'y':0.85,'x':0.45,'xanchor': 'center','yanchor': 'top'}, xaxis_title="Frequency (kHz)", yaxis_title="Stacked Phase (Degrees)")
phaseinout.update_xaxes(range = [0, 11])
phaseinout.update_yaxes(range = [0, 4000])
st.write(phaseinout)
#stacked phase vs L/lambda
phaseout = go.Figure()
phaseout.add_trace(go.Scatter(x = tf.D_lambda5[0:19], y = tf.stacked_phase5_trans[0:19], mode = 'lines+markers', name = 'Output'))
phaseout.update_layout( title={'text':"Stacked Phase of Output Signal against L/lambda",'y':0.85,'x':0.45,'xanchor': 'center','yanchor': 'top'}, xaxis_title="L/lamda", yaxis_title="Stacked Phase (Degrees)")
phaseout.update_xaxes(range = [0, 5.5])
phaseout.update_yaxes(range = [-500, 2500])
st.write(phaseout)
#Gain and Phase plot vs frequnecy
fig = make_subplots(specs=[[{"secondary_y": True}]])
fig.add_trace(go.Scatter(x=main.change_in_frequency[0:23], y=tf.stacked_phase5_trans[0:23], mode = 'lines+markers', name="Stacked Phase"),secondary_y=False,)
fig.add_trace(go.Scatter(x=main.change_in_frequency[0:23], y=tf.mag5_trans[0:23], mode = 'lines+markers', name="Gain Factor"),secondary_y=True,)
fig.update_xaxes(title_text="Frequency (kHz)")
fig.update_yaxes(title_text="Phase (degrees)", secondary_y=False)
fig.update_yaxes(title_text="Gain (Arbitary Units)", secondary_y=True)
fig.update_layout( title={'text':'Gain Factor and Stacked Phase of the Bender Element Signal','y':0.85,'x':0.43,'xanchor': 'center','yanchor': 'top'})
fig.update_xaxes(range = [0, 11])
fig.update_yaxes(range = [-500, 2000], secondary_y=False)
fig.update_yaxes(range = [0, 3], secondary_y=True)
st.write(fig)
#arrival time vs frequency
arrival = go.Figure()
arrival.add_trace(go.Scatter(x = main.change_in_frequency[1:23], y = tf.slope5_trans[0:22], mode = 'lines+markers', name = 'Frequency Interval = 0.49kHz'))
arrival.add_trace(go.Scatter(x = main.change_in_frequency[7:12], y = tf.Tarr5_list, mode = 'lines', name = 'Frequency Interval = 1.96kHz'))
arrival.update_layout( title={'text':"Shear Wave Arrival Time",'y':0.85,'x':0.45,'xanchor': 'center','yanchor': 'top'}, xaxis_title="Frequency (kHz)", yaxis_title="Arrival Time (ms)")
arrival.update_xaxes(range = [0, 11])
arrival.update_yaxes(range = [-200, 1600])
st.write(arrival)
if signal == '6kHz':
#input and output gain
gaininout = go.Figure()
gaininout.add_trace(go.Scatter(x = main.change_in_frequency[0:23], y = faft.mag_fft_data6_input[0:23], mode = 'lines+markers', name = 'Input'))
gaininout.add_trace(go.Scatter(x = main.change_in_frequency[0:23], y = faft.mag_fft_data6_output[0:23], mode = 'lines+markers', name = 'Output'))
gaininout.update_layout( title={'text':"Input and Output Signal Magnitude",'y':0.85,'x':0.45,'xanchor': 'center','yanchor': 'top'}, xaxis_title="Frequency (kHz)", yaxis_title="Magnitude (Arbitary Units)")
gaininout.update_xaxes(range = [0, 11])
gaininout.update_yaxes(range = [0, 1000])
st.write(gaininout)
#input and output stacked phase
phaseinout = go.Figure()
phaseinout.add_trace(go.Scatter(x = main.change_in_frequency[0:23], y = faft.stacked_phase6in[0:23], mode = 'markers', name = 'Input'))
phaseinout.add_trace(go.Scatter(x = main.change_in_frequency[0:23], y = faft.stacked_phase6out[0:23], mode = 'markers', name = 'Output'))
phaseinout.update_layout( title={'text':"Input and Output Signal Stacked Phase",'y':0.85,'x':0.45,'xanchor': 'center','yanchor': 'top'}, xaxis_title="Frequency (kHz)", yaxis_title="Stacked Phase (Degrees)")
phaseinout.update_xaxes(range = [0, 11])
phaseinout.update_yaxes(range = [0, 4000])
st.write(phaseinout)
#stacked phase vs L/lambda
phaseout = go.Figure()
phaseout.add_trace(go.Scatter(x = tf.D_lambda3[0:19], y = tf.stacked_phase6_trans[0:19], mode = 'lines+markers', name = 'Output'))
phaseout.update_layout( title={'text':"Stacked Phase of Output Signal against L/lambda",'y':0.85,'x':0.45,'xanchor': 'center','yanchor': 'top'}, xaxis_title="L/lamda", yaxis_title="Stacked Phase (Degrees)")
phaseout.update_xaxes(range = [0, 5.5])
phaseout.update_yaxes(range = [-500, 2500])
st.write(phaseout)
#Gain and Phase plot vs frequnecy
fig = make_subplots(specs=[[{"secondary_y": True}]])
fig.add_trace(go.Scatter(x=main.change_in_frequency[0:23], y=tf.stacked_phase6_trans[0:23], mode = 'lines+markers', name="Stacked Phase"),secondary_y=False,)
fig.add_trace(go.Scatter(x=main.change_in_frequency[0:23], y=tf.mag6_trans[0:23], mode = 'lines+markers', name="Gain Factor"),secondary_y=True,)
fig.update_xaxes(title_text="Frequency (kHz)")
fig.update_yaxes(title_text="Phase (degrees)", secondary_y=False)
fig.update_yaxes(title_text="Gain (Arbitary Units)", secondary_y=True)
fig.update_layout( title={'text':'Gain Factor and Stacked Phase of the Bender Element Signal','y':0.85,'x':0.43,'xanchor': 'center','yanchor': 'top'})
fig.update_xaxes(range = [0, 11])
fig.update_yaxes(range = [-500, 2500], secondary_y=False)
fig.update_yaxes(range = [0, 1.6], secondary_y=True)
st.write(fig)
#arrival time vs frequency
arrival = go.Figure()
arrival.add_trace(go.Scatter(x = main.change_in_frequency[1:23], y = tf.slope6_trans[0:22], mode = 'lines+markers', name = 'Frequency Interval = 0.49kHz'))
arrival.add_trace(go.Scatter(x = main.change_in_frequency[7:12], y = tf.Tarr6_list, mode = 'lines', name = 'Frequency Interval = 1.96kHz'))
arrival.update_layout( title={'text':"Shear Wave Arrival Time",'y':0.85,'x':0.45,'xanchor': 'center','yanchor': 'top'}, xaxis_title="Frequency (kHz)", yaxis_title="Arrival Time (ms)")
arrival.update_xaxes(range = [0, 11])
arrival.update_yaxes(range = [-200, 1600])
st.write(arrival)
if signal == '7kHz':
#input and output gain
gaininout = go.Figure()
gaininout.add_trace(go.Scatter(x = main.change_in_frequency[0:23], y = faft.mag_fft_data7_input[0:23], mode = 'lines+markers', name = 'Input'))
gaininout.add_trace(go.Scatter(x = main.change_in_frequency[0:23], y = faft.mag_fft_data7_output[0:23], mode = 'lines+markers', name = 'Output'))
gaininout.update_layout( title={'text':"Input and Output Signal Magnitude",'y':0.85,'x':0.45,'xanchor': 'center','yanchor': 'top'}, xaxis_title="Frequency (kHz)", yaxis_title="Magnitude (Arbitary Units)")
gaininout.update_xaxes(range = [0, 11])
gaininout.update_yaxes(range = [0, 1000])
st.write(gaininout)
#input and output stacked phase
phaseinout = go.Figure()
phaseinout.add_trace(go.Scatter(x = main.change_in_frequency[0:23], y = faft.stacked_phase7in[0:23], mode = 'markers', name = 'Input'))
phaseinout.add_trace(go.Scatter(x = main.change_in_frequency[0:23], y = faft.stacked_phase7out[0:23], mode = 'markers', name = 'Output'))
phaseinout.update_layout( title={'text':"Input and Output Signal Stacked Phase",'y':0.85,'x':0.45,'xanchor': 'center','yanchor': 'top'}, xaxis_title="Frequency (kHz)", yaxis_title="Stacked Phase (Degrees)")
phaseinout.update_xaxes(range = [0, 11])
phaseinout.update_yaxes(range = [0, 4000])
st.write(phaseinout)
#stacked phase vs L/lambda
phaseout = go.Figure()
phaseout.add_trace(go.Scatter(x = tf.D_lambda7[0:19], y = tf.stacked_phase7_trans[0:19], mode = 'lines', name = 'Output'))
phaseout.update_layout( title={'text':"Stacked Phase of Output Signal against L/lambda",'y':0.85,'x':0.45,'xanchor': 'center','yanchor': 'top'}, xaxis_title="L/lamda", yaxis_title="Stacked Phase (Degrees)")
phaseout.update_xaxes(range = [0, 5.5])
phaseout.update_yaxes(range = [-500, 2500])
st.write(phaseout)
#Gain and Phase plot vs frequnecy
fig = make_subplots(specs=[[{"secondary_y": True}]])
fig.add_trace(go.Scatter(x=main.change_in_frequency[0:23], y=tf.stacked_phase7_trans[0:23], mode = 'lines+markers', name="Stacked Phase"),secondary_y=False,)
fig.add_trace(go.Scatter(x=main.change_in_frequency[0:23], y=tf.mag7_trans[0:23], mode = 'lines+markers', name="Gain Factor"),secondary_y=True,)
fig.update_xaxes(title_text="Frequency (kHz)")
fig.update_yaxes(title_text="Phase (degrees)", secondary_y=False)
fig.update_yaxes(title_text="Gain (Arbitary Units)", secondary_y=True)
fig.update_layout( title={'text':'Gain Factor and Stacked Phase of the Bender Element Signal','y':0.85,'x':0.43,'xanchor': 'center','yanchor': 'top'})
fig.update_xaxes(range = [0, 11])
fig.update_yaxes(range = [-500, 2000], secondary_y=False)
fig.update_yaxes(range = [0, 1.6], secondary_y=True)
st.write(fig)
#arrival time vs frequency
arrival = go.Figure()
arrival.add_trace(go.Scatter(x = main.change_in_frequency[1:23], y = tf.slope7_trans[0:22], mode = 'lines+markers', name = 'Frequency Interval = 0.49kHz'))
arrival.add_trace(go.Scatter(x = main.change_in_frequency[7:12], y = tf.Tarr7_list, mode = 'lines', name = 'Frequency Interval = 1.96kHz'))
arrival.update_layout( title={'text':"Shear Wave Arrival Time",'y':0.85,'x':0.45,'xanchor': 'center','yanchor': 'top'}, xaxis_title="Frequency (kHz)", yaxis_title="Arrival Time (ms)")
arrival.update_xaxes(range = [0, 11])
arrival.update_yaxes(range = [-200, 1600])
st.write(arrival)
|
"""Algorithms.py File. This file contains the following classes: SeqTranslate.
SeqTranslate Class. Used for interpreting base64 representations of the target locations as well as their sequences.
To interpret these run the class instance at the bottom of the file with the desired base64 representation into the
decompress_tuple function."""
import GlobalSettings
class SeqTranslate:
def __init__(self):
# Modification of MIME base64 coding so that +- can be used for strand direction
self.base_array_64 = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789=/"
self.endo_info = dict()
self.endo_import()
# used to convert numbers in base4 back to nucleotides
def int2nt(self, num):
if num == 0:
return 'A'
elif num == 1:
return 'T'
elif num == 2:
return 'C'
elif num == 3:
return 'G'
else:
return 'N'
def nt2int(self,nt):
if nt == 'A':
return 0
elif nt == 'T':
return 1
elif nt == 'C':
return 2
elif nt == 'G':
return 3
else:
return 0
def compress(self, uncompressed, base):
compseq = 0
if type(uncompressed) == str:
for i in range(len(uncompressed)):
val = self.nt2int(uncompressed[i]) * pow(4, i) # multiplying by power-4 converts to base10
compseq += val
uncompressed = compseq
compreturn = str()
while uncompressed >= base:
rem = uncompressed%base
uncompressed = int(uncompressed/base)
compreturn = self.base_array_64[rem] + compreturn
compreturn = self.base_array_64[uncompressed] + compreturn
return compreturn
def to_generic_compressed(self, seqobj):
# Passed as a tuple from the repeats section of the .cspr file
if type(seqobj) == list:
gencomp = seqobj[0] + "." + seqobj[1][1:]
else:
split = seqobj.find("+")
if split != -1:
gencomp = seqobj[:split] + "." + seqobj[split+1:]
else:
split = seqobj.find("-")
gencomp = seqobj[:split] + "." + seqobj[split+1:]
return gencomp
# Decompresses the base64 representation into base10. If toseq is true it returns the sequence itself (nucleotides)
def decompress64(self, base64seq, slength=0, toseq=False):
base10seq = int()
if isinstance(base64seq, str):
for i in range(len(base64seq)):
power = len(base64seq) - (i+1)
index = self.base_array_64.find(base64seq[i])
if index != -1:
base10seq += index*pow(64, power)
else:
base10seq = base64seq
if toseq:
seq = str()
number = base10seq
while number >= 4:
rem = number % 4
number = int(number/4)
seq += self.int2nt(rem)
seq += self.int2nt(number)
for i in range(len(seq), slength):
seq += 'A'
return seq
else:
return base10seq
def decompress_csf_tuple(self, locseq, bool=False, endo="spCas9"):
# Lookup endonuclease sequence lengths for parsing
if(bool == False):
mytuple = locseq[:-1].split(",")
else:
mytuple = locseq.split(",")
front_seq = mytuple[3]
loc = self.decompress64(mytuple[0])
seq = mytuple[1]
scr = self.decompress64(mytuple[2])
strand = seq.find("+")
if strand != -1:
dira = "+"
sequence = seq[:strand]
pam = seq[strand+1:]
else:
seq = seq.split("-")
sequence = seq[0]
pam = seq[1]
dira = "-"
if bool:
seqlength = int(self.endo_info[endo][2]) - int(
self.endo_info[endo][1]) # gets the tail sequence length for processing repeats
else:
seqlength = int(self.endo_info[endo][2]) # gets the total sequence length
pamlength = len(self.endo_info[endo][0].split(",")[0]) # gets the length of the primary PAM
#print(seqlength,pamlength)
sequence = self.decompress64(sequence, seqlength, True)
pam = self.decompress64(pam, pamlength, True)
# The for loops fixes the problem of A's not being added to the end because they are removed on compression
if(bool == True):
sequence = sequence + front_seq
return int(loc), str(sequence), pam, int(scr), dira, endo
def endo_import(self):
f = open(GlobalSettings.appdir + "CASPERinfo")
while True:
line = f.readline()
if line.startswith("ENDONUCLEASES"):
break
while True:
line = f.readline()
if line.startswith("-"):
break
else:
myinfo = line.split(";")
self.endo_info[myinfo[0]] = myinfo[1:] # first is PAM list, second is seed length, third is tot length
#S = SeqTranslate()
#print(S.decompress_csf_tuple("Dx,|S62qFEz+Qy,k", endo='asCas12'))
#print(S.decompress64("C86",False))
#print(S.compress(440159,64))
|
#!/usr/bin/env python
import pdb
import sys
sys.path.append('utils/')
from init import *
sys.path.append(pycaffe_path)
import caffe
import io
from PIL import Image
import matplotlib.pyplot as plt
import numpy as np
import scipy.misc
import time
import glob
import pickle as pkl
import random
import h5py
from multiprocessing import Pool
from threading import Thread
import skimage.io
import copy
import json
import time
import re
import math
UNK_IDENTIFIER = '<unk>'
SENTENCE_SPLIT_REGEX = re.compile(r'(\W+)')
def read_json(t_file):
j_file = open(t_file).read()
return json.loads(j_file)
def split_sentence(sentence):
# break sentence into a list of words and punctuation
sentence = [s.lower() for s in SENTENCE_SPLIT_REGEX.split(sentence.strip()) if len(s.strip()) > 0]
if sentence[-1] != '.':
return sentence
return sentence[:-1]
def check_sublist(sublist, biglist):
len_sublist = len(sublist) - 1
for i in range(len(biglist)):
if tuple(biglist[i:i+len_sublist]) == sublist[:-1]:
return True
return False
def tokenize_text(sentence, vocabulary, leave_out_unks=False):
sentence = [s.strip() for s in split_sentence(sentence)]
token_sent = []
for w in sentence:
try:
token_sent.append(vocabulary[w])
except:
if not leave_out_unks:
try:
token_sent.append(vocabulary['<unk>'])
except:
pass
else:
pass
if not leave_out_unks:
token_sent.append(vocabulary['EOS'])
return token_sent
def open_vocab(vocab_txt):
vocab_list = open(vocab_txt).readlines()
vocab_list = ['EOS'] + [v.strip() for v in vocab_list]
vocab = {}
for iv, v in enumerate(vocab_list): vocab[v] = iv
return vocab
def cub_labels(annotation):
image_id = annotation['image_id']
return int(image_id.split('.')[0]) - 1
def cub_labels_gen(annotation, labels):
return labels[annotation['image_id']]
def textPreprocessor(params):
#input:
# params['caption_json']: text json which contains text and a path to an image if the text is grounded in an image
# params['vocabulary']: vocabulary txt to use
# params['label_extract']: dataset to direct label extraction or None
#output:
# processed_text: tokenized text with corresponding image path (if they exist)
#make vocabulary dict
vocab = open_vocab(params['vocabulary'])
json_text = read_json(params['caption_json'])
processed_text = {}
if 'label_extract' not in params.keys(): params['label_extract'] = None
if 'length_label' not in params.keys(): params['length_label'] = None
if params['label_extract'] == 'CUB_gen':
#read in predicted labels
labels = pkl.load('/x/lisaanne/finegrained/CUB_label_dict.p')
cub_labels_gen_prep = lambda x: cub_labels_gen(x, labels)
else:
cub_labels_gen_prep = cub_labels
label_function = {'CUB': cub_labels, 'CUB_gen': cub_labels_gen_prep}
t = time.time()
for annotation in json_text['annotations']:
processed_text[annotation['id']] = {}
processed_text[annotation['id']]['text'] = tokenize_text(annotation['caption'], vocab)
if 'image_id' in annotation.keys():
processed_text[annotation['id']]['image'] = annotation['image_id']
if params['label_extract']:
processed_text[annotation['id']]['label'] = label_function[params['label_extract']](annotation)
if params['length_label']:
processed_text[annotation['id']]['length'] = len(annotation['caption'])
print "Setting up text dict: ", time.time()-t
return processed_text
class extractData(object):
def increment(self):
#uses iteration, batch_size, data_list, and num_data to extract next batch identifiers
next_batch = [None]*self.batch_size
if self.iteration + self.batch_size >= self.num_data:
next_batch[:self.num_data-self.iteration] = self.data_list[self.iteration:]
next_batch[self.num_data-self.iteration:] = self.data_list[:self.batch_size -(self.num_data-self.iteration)]
random.shuffle(self.data_list)
self.iteration = self.num_data - self.iteration
else:
next_batch = self.data_list[self.iteration:self.iteration+self.batch_size]
self.iteration += self.batch_size
assert self.iteration > -1
assert len(next_batch) == self.batch_size
return next_batch
def advanceBatch(self):
next_batch = self.increment()
self.get_data(next_batch)
class extractFeatureText(extractData):
def __init__(self, dataset, params, result):
self.extractType = 'text'
self.data_list = dataset.keys()
self.num_data = len(self.data_list)
print 'For extractor extractText, length of data is: ', self.num_data
self.dataset = dataset
self.iteration = 0
self.image_dim = params['image_dim']
self.batch_size = params['batch_size']
self.stream_size = params['stream_size']
if not 'feature_size' in params.keys():
params['feature_size'] = 8192
assert 'features' in params.keys()
self.feature_size = params['feature_size']
self.features = params['features']
#preperation to output top
self.text_data_key = params['text_data_key']
self.text_label_key = params['text_label_key']
self.marker_key = params['text_marker_key']
self.image_data_key = params['image_data_key']
self.top_keys = [self.text_data_key, self.text_label_key, self.marker_key, self.image_data_key]
self.batch_size = params['batch_size']
self.stream_size = params['stream_size']
self.top_shapes = [(self.stream_size, self.batch_size), (self.stream_size, self.batch_size), (self.stream_size, self.batch_size), (self.batch_size, self.feature_size)]
self.result = result
def get_data(self, next_batch):
batch_images = [self.dataset[nb]['image'] for nb in next_batch]
next_batch_input_sentences = np.zeros((self.stream_size, self.batch_size))
next_batch_target_sentences = np.ones((self.stream_size, self.batch_size))*-1
next_batch_markers = np.ones((self.stream_size, self.batch_size))
next_batch_image_data = np.ones((self.batch_size, self.feature_size))
next_batch_markers[0,:] = 0
for ni, nb in enumerate(next_batch):
ns = self.dataset[nb]['text']
num_words = len(ns)
ns_input = ns[:min(num_words, self.stream_size-1)]
ns_target = ns[:min(num_words, self.stream_size)]
next_batch_input_sentences[1:min(num_words+1, self.stream_size), ni] = ns_input
next_batch_target_sentences[:min(num_words, self.stream_size), ni] = ns_target
for ni, nb in enumerate(batch_images):
next_batch_image_data[ni,...] = self.features[nb]
self.result[self.text_data_key] = next_batch_input_sentences
self.result[self.text_label_key] = next_batch_target_sentences
self.result[self.marker_key] = next_batch_markers
self.result[self.image_data_key] = next_batch_image_data
class extractPhraseClassData(extractData):
def __init__(self, dataset, params, result):
self.extractType = 'text'
samples_per_image = 5
self.samples_per_image = samples_per_image
self.data_list = dataset.keys()
self.num_data = len(self.data_list)
print 'For extractor extractPhraseClassData, length of data is: ', self.num_data
self.dataset = dataset
self.iteration = 0
self.image_dim = params['image_dim']
self.batch_size = params['batch_size']
self.stream_size = params['stream_size']
if not 'feature_size' in params.keys():
params['feature_size'] = 8192
assert 'features' in params.keys()
self.feature_size = params['feature_size']
self.features = params['features']
#preperation to output top
self.noun_phrases_key = params['noun_phrases_key']
self.marker_key = params['text_marker_key']
self.label_key = params['label_key']
self.image_data_key = params['image_data_key']
self.top_keys = [self.noun_phrases_key, self.marker_key, self.label_key, self.image_data_key]
self.batch_size = params['batch_size']
self.stream_size = params['stream_size']
self.class_phrase_dict = pkl.load(open(params['class_phrase_dict'], 'r'))
vocab = open_vocab(params['vocabulary'])
for c in self.class_phrase_dict:
for i, phrase in enumerate(self.class_phrase_dict[c]):
self.class_phrase_dict[c][i] = tuple(tokenize_text(phrase, vocab))
self.noun_phrases = open(params['noun_phrase_list']).readlines()
self.noun_phrases = [noun_phrase.strip() for noun_phrase in self.noun_phrases]
self.noun_phrases = [tuple(tokenize_text(noun_phrase, vocab)) for noun_phrase in self.noun_phrases]
self.top_shapes = [(self.stream_size, self.batch_size*samples_per_image),
(self.stream_size, self.batch_size*samples_per_image),
(self.stream_size, self.batch_size*samples_per_image),
(self.batch_size*samples_per_image, self.feature_size)]
self.result = result
def get_data(self, next_batch):
batch_images = [self.dataset[nb]['image'] for nb in next_batch]
next_batch_noun_phrases = np.zeros((self.stream_size, self.batch_size*self.samples_per_image))
next_batch_markers = np.ones((self.stream_size, self.batch_size*self.samples_per_image))
next_batch_labels = np.ones((self.stream_size, self.batch_size*self.samples_per_image))*-1
next_batch_image_data = np.ones((self.batch_size*self.samples_per_image, self.feature_size))
next_batch_markers[0,:] = 0
for ni, nb in enumerate(next_batch):
idx = ni*self.samples_per_image
#get positive sentence
sentence = self.dataset[nb]['text']
c = int(batch_images[ni].split('.')[0])-1
sentence_phrases = []
class_phrases = self.class_phrase_dict[c]
for phrase in class_phrases:
if check_sublist(phrase, sentence):
sentence_phrases.append(phrase)
if len(sentence_phrases) == 0:
sentence_phrases = [(276, 0)]
clean_sentence_phrases = []
for i, phrase in enumerate(sentence_phrases):
subphrase = False
for phrase2 in sentence_phrases[:i] + sentence_phrases[i+1:]:
if check_sublist(phrase, phrase2):
subphrase = True
if not subphrase:
clean_sentence_phrases.append(phrase)
clean_sentence_phrases = list(set(clean_sentence_phrases))
random.shuffle(clean_sentence_phrases)
sentence_phrase = clean_sentence_phrases[0]
num_words = len(sentence_phrase)
ns_input = sentence_phrase[:min(num_words, self.stream_size-1)]
next_batch_noun_phrases[1:min(num_words+1, self.stream_size), idx] = ns_input
next_batch_labels[min(num_words-1, self.stream_size), idx] = 1
next_batch_image_data[idx,...] = self.features[batch_images[ni]]
negative_phrases = list(set(self.noun_phrases)-set(self.class_phrase_dict[c]))
random.shuffle(negative_phrases)
for i in range(1,5):
idx = ni*self.samples_per_image + i
sentence_phrase = negative_phrases[i-1]
num_words = len(sentence_phrase)
ns_input = sentence_phrase[:min(num_words, self.stream_size-1)]
next_batch_noun_phrases[1:min(num_words+1, self.stream_size), idx] = ns_input
next_batch_labels[min(num_words-1, self.stream_size), idx] = 0
next_batch_image_data[idx,...] = self.features[batch_images[ni]]
self.result[self.noun_phrases_key] = next_batch_noun_phrases
self.result[self.marker_key] = next_batch_markers
self.result[self.label_key] = next_batch_labels
self.result[self.image_data_key] = next_batch_image_data
class extractImageText(extractData):
def __init__(self, dataset, params, result):
self.extractType = 'text'
self.data_list = dataset.keys()
self.num_data = len(self.data_list)
print 'For extractor extractText, length of data is: ', self.num_data
self.dataset = dataset
self.iteration = 0
self.image_dim = params['image_dim']
self.batch_size = params['batch_size']
self.stream_size = params['stream_size']
self.base_path = params['base_image_path']
#prep to process image
image_data_shape = (self.batch_size, 3, self.image_dim, self.image_dim)
self.transformer = define_transformer(params['image_data_key'], image_data_shape, self.image_dim)
self.imageProcessor = imageProcessor(self.transformer, self.image_dim, params['image_data_key'])
#preperation to output top
self.text_data_key = params['text_data_key']
self.text_label_key = params['text_label_key']
self.marker_key = params['text_marker_key']
self.image_data_key = params['image_data_key']
self.top_keys = [self.text_data_key, self.text_label_key, self.marker_key, self.image_data_key]
self.batch_size = params['batch_size']
self.stream_size = params['stream_size']
self.top_shapes = [(self.stream_size, self.batch_size), (self.stream_size, self.batch_size), (self.stream_size, self.batch_size), image_data_shape]
self.result = result
self.pool_size = 4
self.pool = Pool(processes=self.pool_size)
def get_data(self, next_batch):
batch_images = ['/'.join([self.base_path, self.dataset[nb]['image']]) for nb in next_batch]
next_batch_input_sentences = np.zeros((self.stream_size, self.batch_size))
next_batch_target_sentences = np.ones((self.stream_size, self.batch_size))*-1
next_batch_markers = np.ones((self.stream_size, self.batch_size))
next_batch_image_data = np.ones((self.batch_size, 3, self.image_dim, self.image_dim))
next_batch_markers[0,:] = 0
for ni, nb in enumerate(next_batch):
ns = self.dataset[nb]['text']
num_words = len(ns)
ns_input = ns[:min(num_words, self.stream_size-1)]
ns_target = ns[:min(num_words, self.stream_size)]
next_batch_input_sentences[1:min(num_words+1, self.stream_size), ni] = ns_input
next_batch_target_sentences[:min(num_words, self.stream_size), ni] = ns_target
if self.pool_size > 1:
next_batch_images_list = self.pool.map(self.imageProcessor, batch_images)
for ni in range(len(next_batch)):
next_batch_image_data[ni,...] = next_batch_images_list[ni]
else:
for ni, nb in enumerate(batch_images):
next_batch_image_data[ni,...] = self.imageProcessor(nb)
self.result[self.text_data_key] = next_batch_input_sentences
self.result[self.text_label_key] = next_batch_target_sentences
self.result[self.marker_key] = next_batch_markers
self.result[self.image_data_key] = next_batch_image_data
class extractText(extractData):
def __init__(self, dataset, params, result):
self.extractType = 'text'
self.data_list = dataset.keys()
self.num_data = len(self.data_list)
print 'For extractor extractText, length of data is: ', self.num_data
self.dataset = dataset
self.iteration = 0
self.batch_size = params['batch_size']
self.stream_size = params['stream_size']
#preperation to output top
self.text_data_key = params['text_data_key']
self.text_label_key = params['text_label_key']
self.marker_key = params['text_marker_key']
self.top_keys = [self.text_data_key, self.text_label_key, self.marker_key]
self.batch_size = params['batch_size']
self.stream_size = params['stream_size']
self.top_shapes = [(self.stream_size, self.batch_size), (self.stream_size, self.batch_size), (self.stream_size, self.batch_size)]
self.result = result
def get_data(self, next_batch):
next_batch_input_sentences = np.zeros((self.stream_size, self.batch_size))
next_batch_target_sentences = np.ones((self.stream_size, self.batch_size))*-1
next_batch_markers = np.ones((self.stream_size, self.batch_size))
next_batch_markers[0,:] = 0
for ni, nb in enumerate(next_batch):
ns = self.dataset[nb]['text']
num_words = len(ns)
ns_input = ns[:min(num_words, self.stream_size-1)]
ns_target = ns[:min(num_words, self.stream_size)]
next_batch_input_sentences[1:min(num_words+1, self.stream_size), ni] = ns_input
next_batch_target_sentences[:min(num_words, self.stream_size), ni] = ns_target
self.result[self.text_data_key] = next_batch_input_sentences
self.result[self.text_label_key] = next_batch_target_sentences
self.result[self.marker_key] = next_batch_markers
class extractLabel(extractData):
def __init__(self, dataset, params, result):
self.data_list = dataset.keys()
self.num_data = len(self.data_list)
print 'For extractor extractText, length of data is: ', self.num_data
if 'label_format' not in params.keys():
params['label_format'] = 'number'
self.label_format = params['label_format'] #options: number, onehot, vector
#number is just a number label
#onehot is a onehot binary vector
#vector requires a look up which maps number to continuous valued vector
if self.label_format == 'vector':
assert 'vector_file' in params.keys()
lookup_file = params['vector_file'] #should be pkl file with a single matrix in it that is Label X EmbeddingD
self.lookup = pkl.load(open(lookup_file, 'r'))
if self.label_format == 'onehot':
assert 'size_onehot' in params.keys()
#determine label_size
if self.label_format == 'number':
self.label_size = 1
elif self.label_format == 'onehot':
self.label_size = params['size_onehot']
elif self.label_format == 'vector':
self.label_size = self.lookup.shape[1]
self.dataset = dataset
self.iteration = 0
self.batch_size = params['batch_size']
if 'label_stream_size' in params.keys():
self.stream_size = params['label_stream_size']
else:
self.stream_size = 1
self.supervision = params['sentence_supervision'] #'all' or 'last'
#need to define
#preperation to output top
self.label_key = params['data_label']
self.label_key_n = None
self.top_keys = [self.label_key]
if 'data_label_n' in params.keys():
self.label_key_n = params['data_label_n']
self.top_keys.append(self.label_key_n)
if self.stream_size == 1:
self.top_shapes = [(self.batch_size,self.label_size)]
if self.label_key_n:
self.top_shapes.append((self.batch_size, self.label_size))
else:
if self.label_size > 1:
self.top_shapes = [(self.stream_size, self.batch_size, self.label_size)]
if self.label_key_n:
self.top_shapes.append([(self.stream_size, self.batch_size, self.label_size)])
else:
self.top_shapes = [(self.stream_size, self.batch_size)]
if self.label_key_n:
self.top_shapes.append((self.stream_size, self.batch_size))
self.result = result
def number_label(self, nb):
return nb
def onehot_label(self, nb):
l = np.zeros((self.label_size,))
l[nb] = 1
return l
def vector_label(self, nb):
return self.lookup[nb,:]
def get_data(self, next_batch):
label_transform_dict = {'number': self.number_label, 'onehot': self.onehot_label, 'vector': self.vector_label}
label_transform = label_transform_dict[self.label_format]
next_batch_labels = np.ones((self.top_shapes[0]))*-1
if self.label_key_n:
next_batch_labels_n = np.ones((self.top_shapes[0]))*-1
for ni, nb in enumerate(next_batch):
gt_label = self.dataset[nb]['label']
#specific to cub
if self.label_key_n:
n_label = gt_label
while (n_label == gt_label):
label_list = range(200)
random.shuffle(label_list)
n_label = label_list[0]
nl = label_transform(gt_label)
if (self.supervision == 'all') & (self.stream_size > 1):
next_batch_labels[:, ni] = nl
if (self.supervision == 'last') & (self.stream_size > 1):
#requires that 'target sentence' computed somewhere
if len(np.where(self.result['target_sentence'][:,ni] == 0)[0] > 0):
next_batch_labels[np.where(self.result['target_sentence'][:,ni] == 0)[0][0], ni] = nl
else:
next_batch_labels[-1, ni] = nl
if (self.supervision == 'all') & (self.stream_size == 1):
next_batch_labels[ni,:] = nl
if (self.supervision == 'last') & (self.stream_size == 1):
raise Exception("Cannot have 'last' supervision type if stream size is 1")
if self.label_key_n:
nl = label_transform(n_label)
if (self.supervision == 'all') & (self.stream_size > 1):
next_batch_labels_n[:, ni] = nl
if (self.supervision == 'last') & (self.stream_size > 1):
#requires that 'target sentence' computed somewhere
if len(np.where(self.result['target_sentence'][:,ni] == 0)[0] > 0):
next_batch_labels_n[np.where(self.result['target_sentence'][:,ni] == 0)[0][0], ni] = nl
else:
next_batch_labels_n[-1, ni] = nl
if (self.supervision == 'all') & (self.stream_size == 1):
next_batch_labels_n[ni,:] = nl
if (self.supervision == 'last') & (self.stream_size == 1):
raise Exception("Cannot have 'last' supervision type if stream size is 1")
self.result[self.label_key] = next_batch_labels
if self.label_key_n:
self.result[self.label_key_n] = next_batch_labels_n
class extractLength(extractData):
def __init__(self, dataset, params, result):
self.data_list = dataset.keys()
self.num_data = len(self.data_list)
self.dataset = dataset
self.iteration = 0
self.batch_size = params['batch_size']
#preperation to output top
self.length_key = params['length_label']
self.top_keys = [self.length_key]
self.top_shapes = [(self.batch_size,)]
self.result = result
def get_data(self, next_batch):
next_batch_length = np.ones((self.top_shapes[0]))*-1
for ni, nb in enumerate(next_batch):
nl = self.dataset[nb]['length']
next_batch_length[ni] = nl
self.result[self.length_key] = next_batch_length
class extractMulti(extractData):
#extracts multiple bits of data from the same datasets (e.g., used to image description and image label)
def __init__(self, dataset, params, result):
#just need to set up parameters for "increment"
self.extractors = params['extractors']
self.batch_size = params['batch_size']
self.data_list = dataset.keys()
self.num_data = len(self.data_list)
self.dataset = dataset
self.iteration = 0
self.batch_size = params['batch_size']
self.top_keys = []
self.top_shapes = []
for e in self.extractors:
self.top_keys.extend(e.top_keys)
self.top_shapes.extend(e.top_shapes)
def get_data(self, next_batch):
t = time.time()
for e in self.extractors:
e.get_data(next_batch)
class batchAdvancer(object):
def __init__(self, extractors):
self.extractors = extractors
def __call__(self):
#The batch advancer just calls each extractor
for e in self.extractors:
e.advanceBatch()
class python_data_layer(caffe.Layer):
def setup(self, bottom, top):
random.seed(10)
self.params = eval(self.param_str)
params = self.params
#set up prefetching
self.thread_result = {}
self.thread = None
self.setup_extractors()
self.batch_advancer = batchAdvancer(self.data_extractors)
self.top_names = []
self.top_shapes = []
for de in self.data_extractors:
self.top_names.extend(de.top_keys)
self.top_shapes.extend(de.top_shapes)
self.dispatch_worker()
if 'top_names' in params.keys():
#check top names equal to each other...
if not (set(params['top_names']) == set(self.top_names)):
raise Exception("Input 'top names' not the same as determined top names.")
else:
self.top_names == params['top_names']
print self.top_names
print 'Outputs:', self.top_names
if len(top) != len(self.top_names):
raise Exception('Incorrect number of outputs (expected %d, got %d)' %
(len(self.top_names), len(top)))
self.join_worker()
#for top_index, name in enumerate(self.top_names.keys()):
for top_index, name in enumerate(self.top_names):
shape = self.top_shapes[top_index]
print 'Top name %s has shape %s.' %(name, shape)
top[top_index].reshape(*shape)
def reshape(self, bottom, top):
pass
def forward(self, bottom, top):
if self.thread is not None:
self.join_worker()
for top_index, name in zip(range(len(top)), self.top_names):
top[top_index].data[...] = self.thread_result[name]
self.dispatch_worker()
def dispatch_worker(self):
assert self.thread is None
self.thread = Thread(target=self.batch_advancer)
self.thread.start()
def join_worker(self):
assert self.thread is not None
self.thread.join()
self.thread = None
def backward(self, top, propagate_down, bottom):
pass
class CaptionToLabel(python_data_layer):
#Extracts data to train sentence classifier
def setup_extractors(self):
params = self.params
#check that all parameters are included and set default params
assert 'caption_json' in self.params.keys()
assert 'vocabulary' in self.params.keys()
if 'text_data_key' not in params.keys(): params['text_data_key'] = 'input_sentence'
if 'text_label_key' not in params.keys(): params['text_label_key'] = 'target_sentence'
if 'text_marker_key' not in params.keys(): params['text_marker_key'] = 'cont_sentence'
if 'data_label' not in params.keys(): params['data_label'] = 'data_label'
if 'batch_size' not in params.keys(): params['batch_size'] = 100
if 'stream_size' not in params.keys(): params['stream_size'] = 20
if 'sentence_supervision' not in params.keys(): params['sentence_supervision'] = 'all' #'all' vs. 'last'
if 'label_extract' not in params.keys(): params['label_extract'] = 'CUB'
data = textPreprocessor(params)
text_extractor = extractText(data, params, self.thread_result)
if 'label_stream_size' not in params.keys():
params['label_stream_size'] = params['stream_size']
if 'data_label_feat' in params.keys():
data_label = params['data_label']
params['data_label'] = params['data_label_feat']
label_extractor = extractLabel(data, params, self.thread_result)
params['extractors'] = [text_extractor, label_extractor]
multi_extractor = extractMulti(data, params, self.thread_result)
self.data_extractors = [multi_extractor]
class extractGVEFeatures(python_data_layer):
#Extract features for generating visual explanations
# input sentence: input words for each time step
# target sentence: target words for eath time step
# image features: iamge features
# data_label: class label
# data_label_feat: class label feature
def setup_extractors(self):
params = self.params
#check that all parameters are included and set default params
assert 'caption_json' in self.params.keys()
assert 'vocabulary' in self.params.keys()
if 'text_data_key' not in params.keys(): params['text_data_key'] = 'input_sentence'
if 'text_label_key' not in params.keys(): params['text_label_key'] = 'target_sentence'
if 'text_marker_key' not in params.keys(): params['text_marker_key'] = 'cont_sentence'
if 'data_label' not in params.keys(): params['data_label'] = 'data_label'
if 'data_label_feat' not in params.keys(): params['data_label_feat'] = 'data_label_feat'
if 'image_data_key' not in params.keys(): params['image_data_key'] = 'image_data'
if 'batch_size' not in params.keys(): params['batch_size'] = 100
if 'stream_size' not in params.keys(): params['stream_size'] = 20
if 'image_dim' not in params.keys(): params['image_dim'] = 227
if 'sentence_supervision' not in params.keys(): params['sentence_supervision'] = 'all' #'all' vs. 'last'
if 'label_extract' not in params.keys(): params['label_extract'] = 'CUB'
#assert 'vector_file' in params.keys()
data = textPreprocessor(params)
features = pkl.load(open(cub_features, 'r'))
params['features'] = features
imageText_extractor = extractFeatureText(data, params, self.thread_result)
params['stream_size'] = 1
#extract number label for loss layer
params['label_format'] = 'number'
label_extractor_number = extractLabel(data, params, self.thread_result)
params['data_label'] = params['data_label_feat']
params['label_format'] = 'vector'
label_extractor_vector = extractLabel(data, params, self.thread_result)
params['extractors'] = [imageText_extractor, label_extractor_number, label_extractor_vector]
multi_extractor = extractMulti(data, params, self.thread_result)
self.data_extractors = [multi_extractor]
class nounPhraseIdentification(python_data_layer):
#Extract features for determining if a noun phrase is in an image
#noun phrases
#class label
#image
def setup_extractors(self):
params = self.params
#check that all parameters are included and set default params
assert 'caption_json' in self.params.keys()
assert 'vocabulary' in self.params.keys()
if 'noun_phrases_key' not in params.keys(): params['noun_phrases_key'] = 'noun_phrases'
if 'marker_key' not in params.keys(): params['marker_key'] = 'marker'
if 'label_key' not in params.keys(): params['label_key'] = 'label'
if 'image_data_key' not in params.keys(): params['image_data_key'] = 'image_data'
if 'batch_size' not in params.keys(): params['batch_size'] = 100
if 'stream_size' not in params.keys(): params['stream_size'] = 20
if 'image_dim' not in params.keys(): params['image_dim'] = 227
if 'sentence_supervision' not in params.keys(): params['sentence_supervision'] = 'last' #'all' vs. 'last'
if 'label_extract' not in params.keys(): params['label_extract'] = 'CUB'
if 'class_phrase_dict' not in params.keys(): params['class_phrase_dict'] = 'data/class_phrase_dict.p'
if 'noun_phrase_list' not in params.keys(): params['noun_phrase_list'] = 'data/noun_phrases_clean.txt'
#assert 'vector_file' in params.keys()
params['batch_size'] = params['batch_size']/5 #divide by 5 so we have room for negatives
data = textPreprocessor(params)
features = pkl.load(open(cub_features, 'r'))
params['features'] = features
phrase_extractor = extractPhraseClassData(data, params, self.thread_result)
params['extractors'] = [phrase_extractor]
multi_extractor = extractMulti(data, params, self.thread_result)
self.data_extractors = [multi_extractor]
class extractSentencePairFeatures(python_data_layer):
#Extract features for generating visual explanations
# input sentence: input words for each time step
# target sentence: target words for eath time step
# image features: iamge features
# data_label_feat: class label feature
def setup_extractors(self):
params = self.params
#check that all parameters are included and set default params
assert 'caption_json' in self.params.keys()
assert 'vocabulary' in self.params.keys()
if 'text_data_key' not in params.keys(): params['text_data_key'] = 'input_sentence'
if 'text_label_key' not in params.keys(): params['text_label_key'] = 'target_sentence'
if 'text_marker_key' not in params.keys(): params['text_marker_key'] = 'cont_sentence'
if 'data_label' not in params.keys(): params['data_label'] = 'label_p'
if 'data_label_n' not in params.keys(): params['data_label_n'] = 'label_n'
if 'image_data_key' not in params.keys(): params['image_data_key'] = 'image_data'
if 'batch_size' not in params.keys(): params['batch_size'] = 100
if 'stream_size' not in params.keys(): params['stream_size'] = 20
if 'image_dim' not in params.keys(): params['image_dim'] = 227
if 'sentence_supervision' not in params.keys(): params['sentence_supervision'] = 'all' #'all' vs. 'last'
if 'label_extract' not in params.keys(): params['label_extract'] = 'CUB'
if 'noun_phrase_list' not in params.keys(): params['noun_phrase_list'] = 'data/noun_phrases_clean.txt'
#assert 'vector_file' in params.keys()
data = textPreprocessor(params)
features = pkl.load(open(cub_features, 'r'))
params['features'] = features
imageText_extractor = extractFeatureText(data, params, self.thread_result)
params['stream_size'] = 1
#extract number label for loss layer
params['label_format'] = 'vector'
label_extractor_vector = extractLabel(data, params, self.thread_result)
params['extractors'] = [imageText_extractor, label_extractor_vector]
multi_extractor = extractMulti(data, params, self.thread_result)
self.data_extractors = [multi_extractor]
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.3 on 2017-11-06 00:44
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('adminapp', '0041_auto_20171105_0128'),
]
operations = [
migrations.AlterField(
model_name='sql',
name='rol',
field=models.CharField(choices=[('', (('estudiante', 'Estudiante'),)), ('', (('docente', 'Docente'),)), ('', (('administrativo', 'Administrativo'),)), ('', (('estudiante, docente', 'Estudiante, Docente'),)), ('', (('docente, administrativo', 'Docente, Administrativo'),)), ('', (('administrativo, docente ,estudiante', 'Administrativo, Docente, Estudiante'),)), ('', (('publico', 'Público'),))], max_length=50),
),
]
|
# -*- coding: utf-8 -*-
"""
Created on Mon Dec 28 10:00:02 2020
@author: Alex
"""
import numpy as np
data = np.loadtxt('data.txt')
n = np.int(len(data)/2)
data1 = data[0:n]
data2 = data[n:2*n]
data3 = []
for i in range(0, 200):
data3.append(0.025+i*0.05)
data3 = np.array(data3)
#s: θ23 =π/4 , ∆m =2.4 × 10−3 L = 295.
L = 295
def prob(E, u):
a = np.sin(2*u[0])**2
b = np.sin((1.267*u[1]*L)/E)**2
return 1-a*b
def λ_1(u):
Pvals = prob(data3,u)
data4 = []
for i in range(0,200):
data4.append(data2[i]*Pvals[i])
return data4
def NLL_1(u):
sum = 0
λ_ = λ_1(u)
for i in range(0, 200):
if λ_[i] != 0 and data1[i] == 0:
sum += λ_[i]
else:
LL = λ_[i] + data1[i]*(np.log(data1[i]/λ_[i]) - 1)
sum += LL
return sum
def λ2(u):
Pvals = prob(data3,u)
data4 = []
for i in range(0,200):
data4.append(u[2]*data3[i]*data2[i]*Pvals[i])
return data4
#Input to NLL in the form [θ, mass]
def NLL_2(u):
sum = 0
λ_ = λ2(u)
for i in range(0, 200):
if λ_[i] != 0 and data1[i] == 0:
sum += λ_[i]
else:
LL = λ_[i] + data1[i]*(np.log(data1[i]/λ_[i]) - 1)
sum += LL
return sum |
#coding:utf-8
from flask import Flask
from flask.ext.sqlalchemy import SQLAlchemy
from flask.ext.bootstrap import Bootstrap
from flask.ext.login import LoginManager
from flask.ext.mail import Mail
from flask.ext.moment import Moment
db = SQLAlchemy()
bootstrap = Bootstrap()
login_manager = LoginManager()
mail = Mail()
moment = Moment()
login_manager.login_view='auth.login'
login_manager.session_protection = 'strong'
login_manager.login_message = u'请先登录'
def create_app(config_name,jinja_environment):
app = Flask(__name__)
app.config.from_object(config_name)
for key in jinja_environment.keys():
app.jinja_env.globals[key] = jinja_environment[key]
db.init_app(app)
bootstrap.init_app(app)
login_manager.init_app(app)
mail.init_app(app)
moment.init_app(app)
from .main_bp import main
app.register_blueprint(main)
from .auth_bp import auth
app.register_blueprint(auth,url_prefix='/auth')
from .home_bp import home
app.register_blueprint(home,url_prefix='/home')
from .manage_bp import manage
app.register_blueprint(manage,url_prefix='/manage')
from .blog_bp import blog
app.register_blueprint(blog,url_prefix='/blog')
return app
|
'''
Created on Jul 22, 2013
@author: christian
'''
from eelbrain import datasets, plot
def test_plot_topomap():
"Test plot.Topomap"
ds = datasets.get_uts(utsnd=True)
topo = ds.eval('utsnd.summary(time=(0.075, 0.125))')
p = plot.Topomap(topo, ds=ds, show=False)
p.close()
p = plot.Topomap(topo, ds=ds, vmax=0.2, w=2, show=False)
p.close()
p = plot.Topomap(topo, 'A%B', ds=ds, axw=2, show=False)
p.close()
p = plot.Topomap(topo, ds=ds, sensorlabels=None, show=False)
p.close()
# MNE data
ds = datasets.get_mne_sample(sub=[0, 1], sns=True)
p = plot.Topomap(ds['sns'].summary(time=(.1, .12)), proj='left', show=False)
p.close()
def test_plot_butterfly():
"Test plot.TopoButterfly"
ds = datasets.get_uts(utsnd=True)
p = plot.TopoButterfly('utsnd', ds=ds, show=False)
p.set_topo_t(0.2)
p.close()
p = plot.TopoButterfly('utsnd', ds=ds, vmax=0.2, w=2, show=False)
p.close()
p = plot.TopoButterfly('utsnd', 'A%B', ds=ds, axw=2, show=False)
p.close()
p = plot.TopoButterfly('utsnd', mark=[1, 2], ds=ds, show=False)
p.close()
p = plot.TopoButterfly('utsnd', mark=['1', '2'], ds=ds, show=False)
p.close()
def test_plot_array():
"Test plot.TopoArray"
ds = datasets.get_uts(utsnd=True)
p = plot.TopoArray('utsnd', ds=ds, show=False)
p.set_topo_t(0, 0.2)
p.close()
p = plot.TopoArray('utsnd', ds=ds, vmax=0.2, w=2, show=False)
p.close()
p = plot.TopoArray('utsnd', 'A%B', ds=ds, axw=4, show=False)
p.close()
|
import pymysql as sql
import os
class Beer:
def __init__(self, name, manufacturer, beer_type):
self.name = name
self.manufacturer = manufacturer
self.type = beer_type
class BeersManager:
db = sql.connect(host="localhost", user="testuser", passwd="password", db="python_course")
cursor = db.cursor()
def create_table(self):
print('Creating table...')
query = 'CREATE TABLE beers (beer_id INT NOT NULL AUTO_INCREMENT PRIMARY KEY, ' \
'beer_name TEXT, manufacturer TEXT, beer_type TEXT);'
self.cursor.execute(query)
def fill_table(self, beers):
print('Inserting data to table...')
for b in beers:
query = 'INSERT INTO beers(beer_name, manufacturer, beer_type) ' \
'VALUES("'+b.name+'", "'+b.manufacturer+'", "'+b.type+'");'
self.cursor.execute(query)
def delete_beer(self, beer_id):
print('Deleting beer id: ' + str(beer_id) + '...')
query = 'DELETE FROM beers WHERE beer_id = ' + str(beer_id) + ';'
self.cursor.execute(query)
def delete_all(self):
print('Deleting all beers...')
query = 'TRUNCATE beers;'
self.cursor.execute(query)
def destroy_table(self):
print('Destroying table beers...')
query = 'DROP TABLE beers;'
self.cursor.execute(query)
def print_beers(self):
query = 'SELECT * FROM beers;'
self.cursor.execute(query)
result = self.cursor.fetchall()
for r in result:
print(r)
def close(self):
self.db.close()
manager = BeersManager()
beers = []
cmd = 99
while cmd != 0:
print('1.Create table\n'
'2.Create a beer\n'
'3.Fill table\n'
'4.Print beers\n'
'5.Delete beer\n'
'6.Delete all beers\n'
'7.Destroy table\n'
'8.Clear screen\n'
'0.Exit\n')
cmd = int(input())
if cmd == 1:
manager.create_table()
if cmd == 2:
print('New beer')
print('Name: ')
n = input()
print('Manufacturer: ')
m = input()
print('Beer type: ')
t = input()
beer = Beer(n, m, t)
beers.append(beer)
if cmd == 3:
manager.fill_table(beers)
if cmd == 4:
manager.print_beers()
if cmd == 5:
print('Delete beer')
print('ID: ')
beer_id = int(input())
manager.delete_beer(beer_id)
if cmd == 6:
manager.delete_all()
if cmd == 7:
manager.destroy_table()
if cmd == 8:
os.system('cls')
|
# -*- coding: utf-8 -*-
"""
Created on Tue Mar 16 23:09:30 2021
@author: anand
"""
# importing necessary packages
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from sklearn.tree import DecisionTreeClassifier
from sklearn.metrics import accuracy_score
from sklearn import tree
# Loading the data file
data_load = pd.read_csv('loan_data.csv', sep = ',', header = 0)
data = data_load.copy(deep = True)
# Encoding categorical data by Find and Replace Method
cleanupObjects = {'purpose' : {'debt_consolidation' : 0, 'all_other' : 1,'credit_card' : 2,'home_improvement' : 3,'small_business' : 4,
'major_purchase' : 5,'educational' : 6}}
data = data.replace(cleanupObjects)
# Splitting into Dependant and Independant variables
X = data.iloc[:, 1:-1].values
y = data.iloc[:, -1].values
# Splitting the Dataset into Train and Test data
XTrain, XTest, yTrain, yTest = train_test_split(X, y, test_size = 0.30, random_state = 0)
# Creating a model instance and training the model
clfEntropy = DecisionTreeClassifier(criterion = 'entropy', random_state = 0, max_depth = 3, min_samples_leaf = 14)
clfEntropy.fit(XTrain, yTrain)
# Predictihg the output
yPred = clfEntropy.predict(XTest)
# Checking the accuracy_score
accuracy = accuracy_score(yTest, yPred) * 100
print(f"The model is working with accuracy of {round(accuracy,2)} %")
|
values = [{"x" : x, "y" : y, "z" : z} for x in range(100) for y in range(100) for z in range(100) if x < y and y < z if x**2 + y**2 == z**2]
for d in values:
print(d) #組み合わせ全部
print(len(values)) #個数
print(f"最小のペア:{values[0]}") #最小値ペア
print(f"最大のペア{values[-1]}") #最大値ペア
#
|
import cv2
import numpy as np
import os
'''
Requisiti: Python, Pip installati
pip3 install virtualenv
virtualenv --python=python3 env
se errore: cannot be loaded because running scripts is disabled on this system
Aprire powersheel come amministratore e inserire
Set-ExecutionPolicy RemoteSigned
'''
path_mask = os.path.join("dataset","mask")
names_mask = os.listdir(path_mask)
for name in names_mask:
path = os.path.join(path_mask,name)
im = cv2.imread(path)
#im = cv2.cvtColor(im, cv2.COLOR_RGB2BGR)
#im = cv2.cvtColor(im, cv2.COLOR_BGR2RGB)
#im = cv2.cvtColor(im, cv2.COLOR_RGB2GRAY)
size = 64
channel = 3
cv2.imshow("kl",im)
im = cv2.resize(im,(size,size)) # colonne,righe
im = im.reshape(size*size*channel)
print(im.shape)
im = im.reshape(size,size,channel)
#im = cv2.cvtColor(im, cv2.COLOR_RGB2GRAY)
cv2.imshow("klk",im) # funziona solo in google colab cv2.imshow("window",im)
cv2.waitKey(0)
break
|
import sys
sys.path.append('./ActivationFunction')
import numpy as np
import sigmoid as sig
import softmax as sofm
# 三層ニューラルネットワークのクラス
class Three_Layer_Neural_Network:
# 仮に値を設定しておく。所謂初期化。
# 層に入力される値
input = np.array([1.0, 0.5])
# 重み
weight = np.array([[0.1, 0.3, 0.5], [0.2, 0.4, 0.6]])
# バイアス
bias = np.array([0.1, 0.2, 0.3])
# コンストラクタ
def __init__(self, input1, weight1, bias1):
self.input = input1
self.weight = weight1
self.bias = bias1
# 新たな層の重みとバイアスを設定する。
def set_weight_and_bias(self, weightx, biasx):
self.weight = weightx
self.bias = biasx
# シグモイド関数を用いて、層に入力された値と重み、バイアスから次の層へ入力する値を得る。
def input_to_layer_by_sigmoid(self):
signal1 = np.dot(self.input, self.weight) + self.bias
self.input = sig.sigmoid(signal1)
# 恒等関数を用いて、層に入力された値と重み、バイアスから次の層へ入力する値を得る。
def input_to_layer_by_identify(self):
self.input = np.dot(self.input, self.weight) + self.bias
# 結果を出力する。
def print_result(self):
print(self.input)
print(self.weight)
print(self.bias)
# 以下は使用例。
input1 = np.array([1.0, 0.5])
weight1 = np.array([[0.1, 0.3, 0.5], [0.2, 0.4, 0.6]])
bias1 = np.array([0.1, 0.2, 0.3])
threeLayerNeuralNetwork = Three_Layer_Neural_Network(input1, weight1, bias1)
threeLayerNeuralNetwork.print_result()
threeLayerNeuralNetwork.input_to_layer_by_sigmoid()
threeLayerNeuralNetwork.print_result()
weight2 = np.array([[0.1, 0.4], [0.2, 0.5], [0.3, 0.6]])
bias2 = np.array([0.1, 0.2])
threeLayerNeuralNetwork.set_weight_and_bias(weight2, bias2)
threeLayerNeuralNetwork.input_to_layer_by_sigmoid()
threeLayerNeuralNetwork.print_result()
weight3 = np.array([[0.1, 0.3], [0.2, 0.4]])
bias3 = np.array([0.1, 0.2])
threeLayerNeuralNetwork.set_weight_and_bias(weight3, bias3)
threeLayerNeuralNetwork.input_to_layer_by_identify()
threeLayerNeuralNetwork.print_result()
|
import requests
from lxml import etree
import json
from bs4 import BeautifulSoup
url = 'http://oa.bears.com.cn:27292/sys/attachment/sys_att_main/sysAttMain.do?method=download&fdId=1744714b4a6ac7fe1da661c4b8f859f2'
print(url)
# url = 'http://oa.bears.com.cn:27292/km/institution/?categoryId=15dbf410dca548412ab64024e8bb4521#cri.q=docStatus:30'
headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) '
'AppleWebKit/537.36 (KHTML, like Gecko) Chrome/84.0.4147.135 Safari/537.36'}
cookies = {'Cookie': 'j_lang=zh-CN; JSESSIONID=7A412515B0D214E6902E93BB1462566F; LtpaToken=AAECAzVGNjBDQTg5NUY2MTczNDl6'
'aGFvbGbsLr33qQQxukkOoIxIV0urDDvxxg=='}
response = requests.get(url,headers=headers,cookies=cookies)
frie_name = [1,2]
for a in frie_name:
with open('%s.pdf'%a,'wb') as f:
f.write(response.content)
print(response)
# code = etree.HTML(response)
#
# info = code.xpath("//div")
#
# #//div[@class='clearfloat lui_listview_rowtable_summary_content_box']/dl[2]/dt/a/@href
# print(info)
#------
# 下载链接:http://oa.bears.com.cn:27292/sys/attachment/sys_att_main/sysAttMain.do?method=download&fdId=16a04bef01c896732066e084b8fa5385 |
# -*- coding:utf-8 -*-
# author: will
import sys
import importlib
importlib.reload(sys)
import logging
import os
import re
import time
from logging import Formatter
from logging.handlers import TimedRotatingFileHandler
from utils.file_lock import FileLock
class Logging(object):
# Manually specify a client
# sentry_handler = SentryHandler(KeysConfig.get_sentry_dsn()) # 初始化Sentry客户端
# sentry_handler.setLevel(logging.INFO) # 设置Sentry客户端日志级别
#
# setup_logging(sentry_handler) # 集成Sentry到log
_default_format_template = "%(asctime)s [%(levelname)s] [API/%(funcName)s] [%(filename)s:%(lineno)d] %(message)s"
# logging.config.dictConfig(
# setting.LOGGING
# )
logger = logging.getLogger(__name__)
class ReadEasyFormatter(Formatter):
"""日志信息显示汉字而不是unicode"""
def format(self, record):
"""
Format the specified record as text.
The record's attribute dictionary is used as the operand to a
string formatting operation which yields the returned string.
Before formatting the dictionary, a couple of preparatory steps
are carried out. The message attribute of the record is computed
using LogRecord.getMessage(). If the formatting string uses the
time (as determined by a call to usesTime(), formatTime() is
called to format the event time. If there is exception information,
it is formatted using formatException() and appended to the message.
"""
record.message = record.getMessage()
if self.usesTime():
record.asctime = self.formatTime(record, self.datefmt)
try:
s = self._fmt % record.__dict__
except UnicodeDecodeError as e:
# Issue 25664. The logger name may be Unicode. Try again ...
try:
record.name = record.name.decode('utf-8')
s = self._fmt % record.__dict__
except UnicodeDecodeError:
raise e
if record.exc_info:
# Cache the traceback text to avoid converting it multiple times
# (it's constant anyway)
if not record.exc_text:
record.exc_text = self.formatException(record.exc_info)
if record.exc_text:
if s[-1:] != "\n":
s = s + "\n"
try:
s = s + record.exc_text
except UnicodeError:
# Sometimes filenames have non-ASCII chars, which can lead
# to errors when s is Unicode and record.exc_text is str
# See issue 8924.
# We also use replace for when there are multiple
# encodings, e.g. UTF-8 for the filesystem and latin-1
# for a script. See issue 13232.
s = s + record.exc_text.decode(sys.path.getfilesystemencoding(),
'replace')
if re.findall(r"u'\\u", s):
s = s.encode('utf-8').decode('unicode_escape')
return s
class SafeTimedRotatingFileHandler(TimedRotatingFileHandler):
"""解决多进程日志切分的问题"""
def doRollover(self):
"""
do a rollover; in this case, a date/time stamp is appended to the filename
when the rollover happens. However, you want the file to be named for the
start of the interval, not the current time. If there is a backup count,
then we have to get a list of matching filenames, sort them and remove
the one with the oldest suffix.
"""
if self.stream:
self.stream.close()
self.stream = None
# get the time that this sequence started at and make it a TimeTuple
currentTime = int(time.time())
dstNow = time.localtime(currentTime)[-1]
t = self.rolloverAt - self.interval
if self.utc:
timeTuple = time.gmtime(t)
else:
timeTuple = time.localtime(t)
dstThen = timeTuple[-1]
if dstNow != dstThen:
if dstNow:
addend = 3600
else:
addend = -3600
timeTuple = time.localtime(t + addend)
dfn = self.baseFilename + "." + time.strftime(self.suffix, timeTuple)
# dfn = self.baseFilename + "." + str(datetime.datetime.now().date())
if not os.path.exists(dfn) and os.path.exists(self.baseFilename):
with FileLock(self.baseFilename):
# 加锁后再判断一次,防止多进程中同时修改文件名
if not os.path.exists(dfn) and os.path.exists(self.baseFilename):
os.rename(self.baseFilename, dfn)
# if os.path.exists(dfn):
# os.remove(dfn)
# # Issue 18940: A file may not have been created if delay is True.
# if os.path.exists(self.baseFilename):
# os.rename(self.baseFilename, dfn)
if self.backupCount > 0:
for s in self.getFilesToDelete():
os.remove(s)
if not self.delay:
self.stream = self._open()
newRolloverAt = self.computeRollover(currentTime)
while newRolloverAt <= currentTime:
newRolloverAt = newRolloverAt + self.interval
# If DST changes and midnight or weekly rollover, adjust for this.
if (self.when == 'MIDNIGHT' or self.when.startswith('W')) and not self.utc:
dstAtRollover = time.localtime(newRolloverAt)[-1]
if dstNow != dstAtRollover:
if not dstNow: # DST kicks in before next rollover, so we need to deduct an hour
addend = -3600
else: # DST bows out before next rollover, so we need to add an hour
addend = 3600
newRolloverAt += addend
self.rolloverAt = newRolloverAt
@classmethod
def addTimedRotatingFileHandler(cls, filename, when='h', interval=1, backupCount=0, encoding=None, delay=False,
utc=False, fmt=None):
handler = cls.SafeTimedRotatingFileHandler(filename, when, interval, backupCount, encoding, delay, utc)
format_template = fmt or cls._default_format_template
log_format = cls.ReadEasyFormatter(fmt=format_template)
handler.setFormatter(log_format)
cls.logger.addHandler(handler)
|
'''
Дано положительное двузначное число.
Найдите число десятков в нем.
Примеры:
Тест 1
Входные данные:
42
Вывод программы:
4
''' |
from django.shortcuts import render,redirect
from .models import Member,TempMember,Admin
from django.core.exceptions import PermissionDenied
from django.http import HttpResponse,HttpResponseBadRequest
def homepage(request):
return render(request,"MathClubVitap/index.html")
def signup(request):
success={
'title':'','message':''}
warnings={
'title':'','message':''
}
if(request.method=='POST'):
name=request.POST["name"]
email=request.POST["email"]
email=email+"@vitap.ac.in"
email=email.lower()
reg=request.POST["reg"]
reg=reg.upper()
password=request.POST["password"]
confirmpassword=request.POST["confirmpassword"]
try:
user=Member.objects.get(username=email)
except Exception:
user=None
try:
user1=TempMember.objects.get(username=email)
except Exception:
user1=None
if(user==None):
if(user1==None):
if(password==confirmpassword):
user=TempMember.objects.create(username=email,password=password,name=name,regno=reg)
user.save()
success['title']="Account created successfuly! "
success['message']="Waiting for administrator to process your request!"
else:
warnings['title']="Error: "
warnings['message']="Password didn't match!"
else:
success['title']="Thank you for your patience: "
success['message']="Your request is under progress!"
else:
warnings['title']="Error: "
warnings['message']="User already exits"
return render(request,'MathClubVitap/Signup.html',{'success':success,'warning':warnings})
def about(request):
return render(request,'MathClubVitap/about.html')
def events(request):
return render(request,'MathClubVitap/event.html')
|
from datetime import datetime
def next_month(sourcedate):
month = sourcedate.month + 1 # Get next month
if month == 13: # If month is 13, then must increment year
return datetime(sourcedate.year+1, 1, 1) # Create new date with incremented year, month of one (1), day of one (1)
return datetime(sourcedate.year, month, 1) # If here, then month was less than 13, keep year, use incremented month, day of one (1)
|
# QSTK Imports
import QSTK.qstkutil.qsdateutil as du
import QSTK.qstkutil.tsutil as tsu
import QSTK.qstkutil.DataAccess as da
# Third Party Imports
import datetime as dt
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
def simulate(start_date, end_date, equities, allocations):
'''
:param start_date: datetime object
:param end_date: datetime object
:param equities: list of stock symbols (strings)
:param allocations: list of percent allocation for each stock (float, must add to 1.0)
:return: standard deviation of daily returns, average daily return of portfolio, Sharpe ratio, cumulative return
'''
# We want closing prices
dt_timeofday = dt.timedelta(hours=16)
# Get a list of trading days between start and end dates
ldt_timestamps = du.getNYSEdays(start_date, end_date, dt_timeofday)
# Get data from Yahoo
c_dataobj = da.DataAccess('Yahoo', cachestalltime=0)
# We only need closing data (you can specify other keys for
# other types of data, like 'high', 'low', 'open', etc.
ls_keys = ['close']
# Retrieve data
ldf_data = c_dataobj.get_data(ldt_timestamps, equities, ls_keys)
# Create dictionary with data using the keys specified above in ls_keys
d_data = dict(zip(ls_keys, ldf_data))
# Create numpy array of close prices.
na_price = d_data['close'].values
# Normalize prices according to first day
na_normalized_price = na_price / na_price[0, :]
# Weight each normalized price
na_weighted = na_normalized_price * allocations
# Value for each day (row-wise sum)
na_values = na_weighted.copy().sum(axis=1)
# Return for each day
na_daily_returns = na_values.copy()
tsu.returnize0(na_daily_returns)
# Volatility (standard deviation) of daily returns
std_dev = np.std(na_daily_returns)
# Get average daily return
avg_daily_return = np.mean(na_daily_returns)
# Calculate Sharpe ratio
number_of_trading_days = len(na_daily_returns)
sharpe = np.sqrt(number_of_trading_days) * (avg_daily_return / std_dev)
# Calculate cumulative daily return using formula
# daily_cum_ret(t) = daily_cum_ret(t-1) * (1 + daily_ret(t))
daily_cum_ret = np.zeros(number_of_trading_days)
daily_cum_ret[0] = 1.0
for i in np.arange(1, number_of_trading_days, 1):
daily_cum_ret[i] = daily_cum_ret[i-1] * (1 + na_daily_returns[i])
#print daily_cum_ret
#print na_daily_returns
#print sharpe
#print std_dev
#print avg_daily_return
#print daily_cum_ret[number_of_trading_days-1]
return std_dev, avg_daily_return, sharpe, daily_cum_ret[number_of_trading_days-1]
if __name__ == "__main__":
#Test function
start_date = dt.datetime(2011, 1, 1)
end_date = dt.datetime(2011, 12, 31)
equities = ['AAPL', 'GLD', 'GOOG', 'XOM']
allocations = [0.4, 0.4, 0.0, 0.2]
simulate(start_date, end_date, equities, allocations)
optimum_allocation = []
best_sharpe = 0.0
best_cum = 0.0
best_avg_daily_ret = 0.0
for a1 in np.arange(0.0, 1.0, 0.1):
for a2 in np.arange(0.0, 1.0, 0.1):
for a3 in np.arange(0.0, 1.0, 0.1):
for a4 in np.arange(0.0, 1.0, 0.1):
if a1 + a2 + a3 + a4 == 1.0:
alloc = [a1, a2, a3, a4]
vol, avg_daily_ret, sharpe, cum_ret = simulate(start_date, end_date, equities, alloc)
if sharpe > best_sharpe:
best_sharpe = sharpe
best_cum = cum_ret
best_avg_daily_ret = avg_daily_ret
optimum_allocation = alloc
print "Start Date: ", start_date
print "End Date: ", end_date
print "Symbols: ", equities
print "Optimum Allocations: ", optimum_allocation
print "Sharpe Ratio: ", best_sharpe
print "Volatility (stddev of daily returns): ", vol
print "Average Daily Return: ", best_avg_daily_ret
print "Cumulative Return: ", best_cum
|
from ke2mongo.lib.mongo import mongo_client_db
#!/usr/bin/env python
# encoding: utf-8
"""
untitled.py
Created by Ben Scott on 2013-08-23.
Copyright (c) 2013 __MyCompanyName__. All rights reserved.
"""
def main():
# Setup MongoDB
mongo_db = mongo_client_db()
fields = [
'DarLocality',
'DarVerbatimElevation',
'DarInfraspecificRank',
'DarDayIdentified',
'DarMinimumDepthInMeters',
'DarMonthIdentified',
'DarMaximumDepthInMeters',
'DarIndividualCount',
'DarMaximumDepth',
'DarVerbatimCollectingDate',
'DarTissues',
'DarScientificNameAuthorYear',
'DarVerbatimLongitude',
'DarNotes',
'DarCollectorNumber',
'DarGenBankNum',
'DarIdentificationModifier',
'DarMinimumDepth',
'DarLatLongComments',
'DarIsland',
'DarPreviousCatalogNumber',
'DarEndTimeOfDay',
'DarYearCollected',
'DarVerbatimDepth',
'DarCatalogNumber',
'DarOriginalCoordinateSystem',
'DarScientificNameAuthor',
'DarOtherCatalogNumbers',
'DarSubgenus',
'DarFieldNumber',
'DarYearIdentified',
'DarRelationshipType',
'DarEndMonthCollected',
'DarInfraspecificEpithet',
'DarAgeClass',
'DarRemarks',
'DarGeodeticDatum',
'DarKingdom',
'DarStart_EndCoordinatePrecision',
'DarCoordinatePrecision',
'DarStartTimeOfDay',
'DarSpecificEpithet',
'DarDecimalLongitude',
'DarLatitude',
'DarCitation',
'DarLifeStage',
'DarFamily',
'DarStartYearCollected',
'DarEndLatitude',
'DarBasisOfRecord',
'DarMaximumElevation',
'DarStartLatitude',
'DarCounty',
'DarRelatedInformation',
'DarObservedIndividualCount',
'DarSource',
'DarRecordURL',
'DarIslandGroup',
'DarWaterBody',
'DarCoordinateUncertaintyInMeter',
'DarSex',
'DarStartDayCollected',
'DarVerbatimLatitude',
'DarGenus',
'DarTimeOfDay',
'DarImageURL',
'DarDecimalLatitude',
'DarTypeStatus',
'DarStateProvince',
'DarBoundingBox',
'DarGeorefMethod',
'DarScientificName',
'DarCollectionCode',
'DarLongitude',
'DarGlobalUniqueIdentifier',
'DarInstitutionCode',
'DarRelatedCatalogItem',
'DarTimeCollected',
'DarPreparations',
'DarContinent',
'DarEndJulianDay',
'DarGMLFeature',
'DarCountry',
'DarJulianDay',
'DarSubspecies',
'DarFieldNotes',
'DarMaximumElevationInMeters',
'DarContinentOcean',
'DarIdentificationQualifier',
'DarTimeZone',
'DarEndLongitude',
'DarHorizontalDatum',
'DarClass',
'DarRelatedCatalogItems',
'DarPhylum',
'DarStartMonthCollected',
'DarHigherGeography',
'DarDepthRange',
'DarDateLastModified',
'DarCollector',
'DarObservedWeight',
'DarMinimumElevationInMeters',
'DarHigherTaxon',
'DarStartJulianDay',
'DarDayCollected',
'DarTemperature',
'DarEndDayCollected',
'DarStartLongitude',
'DarCatalogNumberNumeric',
'DarOrder',
'DarMinimumElevation',
'DarPreparationType',
'DarEndYearCollected',
'DarMonthCollected',
'DarIdentifiedBy',
'DarCatalogNumberText',
'DarSpecies'
]
for field in fields:
results = mongo_db.ecatalogue.find({field: {'$exists': 1}})
print '{0}:\t{1}\r'.format(field, results.count())
if __name__ == '__main__':
main()
|
import pandas as pd
import numpy as np
from math import isnan
FILE_FULL = './data/training_set_VU_DM_2014.csv'
if __name__ == '__main__':
print('--- Starting Data Cleanup ---')
#data = pd.read_csv(FILE)
print('Loading Full Dataset...')
data_full = pd.read_csv(FILE_FULL)
print('Loaded')
# clean up orig_destination_distance attribute
for i in range(0, 20):
print('Cleaning File {}'.format(i))
data = pd.read_csv('./data/train_split_cleaned2/train_data_{:02d}.csv'.format(i))
data.drop(columns=['Unnamed: 0'], inplace=True)
data['avg_orig_dist'] = 0
data['std_orig_dist'] = 0
s_ids = data['srch_id'].unique()
for j, s_id in enumerate(s_ids):
#print('Search ID {} | {}/{}'.format(s_id, j, len(s_ids)))
# get block of each search
search = data.loc[data['srch_id'] == s_id]
s_idx = search.index
# fill in missing values for orig_destination_distance
num_nulls = search['orig_destination_distance'].isnull().values.sum()
if num_nulls == len(search):
# look for similar trips
origin = search['visitor_location_country_id'].iloc[0]
dest_id = search['srch_destination_id'].iloc[0]
prop_country = search['prop_country_id'].iloc[0]
same_trip = data_full.loc[
(data_full['visitor_location_country_id'] == origin) & ((data_full['srch_destination_id'] == dest_id) | (data_full['prop_country_id'])) == prop_country]
avg_odd = same_trip['orig_destination_distance'].mean()
# can still be nan, just use average distance to this booking...
if isnan(avg_odd):
same_trip = data_full.loc[data_full['srch_destination_id'] == dest_id]
avg_odd = same_trip['orig_destination_distance'].mean()
std_odd = same_trip['orig_destination_distance'].std()
if isnan(avg_odd):
same_trip = data_full.loc[data_full['prop_country_id'] == prop_country]
avg_odd = same_trip['orig_destination_distance'].mean()
std_odd = same_trip['orig_destination_distance'].std()
if isnan(avg_odd):
print('Dest_id {} has no distance data at all!'.format(dest_id))
else:
std_odd = same_trip['orig_destination_distance'].std()
else:
avg_odd = search['orig_destination_distance'].mean()
std_odd = search['orig_destination_distance'].std()
data.loc[s_idx, 'avg_orig_dest_dist'] = avg_odd
data.loc[s_idx, 'std_orig_dest_dist'] = std_odd
data.drop(columns=['orig_destination_distance'], inplace=True)
data.to_csv('./data/train_split_cleaned3/train_data_{:02d}.csv'.format(i))
print('Set {} saved'.format(i))
print('--- DONE ---')
|
# usr/bin/env python
#-*- coding:utf-8 -*-
key=['2','1','3','4','5']
value=['bb','aa','cc','dd','ee']
dic=dict(zip(key,value))
# sorted(ディクショナリの中身一覧, key=lambda x:ソートしたい部分)
sort_data = sorted(dic.items(),key=lambda x:x[1])
print(sort_data)
|
from tkinter import *
from tkinter import ttk
_pasos = 80
_dimension = 640
damas = Tk()
damas.title("DAMAS CRACK, MAQUINA, LEYENDA")
tablero = Canvas(damas, width=_dimension, height=_dimension)
damas.geometry('640x640')
tablero.place(x=0, y=0)
fichazul = PhotoImage(file='ficha1.png')
fichamarilla = PhotoImage(file='ficha2.png')
turno= "j1"
nombreJugador1 = "Jonathan"
nombreJugador2 = "Chantal"
turnoJugadorLabel=ttk.Label(damas,text="")
turnoJugadorLabel.place(x="625", y="50")
def turnoj():
turno="Turno de: "
if turno=="j1":
turno+=nombreJugador1
else:
turno+=nombreJugador2
turnoJugadorLabel.config(text=turno)
_lista = []
_posicionar = {}
_index = 0
_datos_act = {"item":None,"px":0,"py":0}
_posazul = [];
_posamarillo = [];
obj = tablero.create_rectangle(0,0,_dimension,_dimension, outline="blue", fill="white")
for i in range(8):
for j in range(8):
pi = i*_pasos
pj = j*_pasos
if _index % 2 == 0:
if j % 2==0:
if _index<=2:
obj = tablero.create_image(pj,pi, anchor = NW, image=fichamarilla,tags=("amarilla","ficha"))
_posicionar[str(pj)+"-"+str(pi)] = obj
_posicionar[str(obj)] = {"px":pj,"py":pi}
else:
if _index<=4:
pass
else:
obj = tablero.create_image(pj,pi, anchor = NW, image=fichazul,tags=("azul","ficha"))
_posicionar[str(pj)+"-"+str(pi)] = obj
_posicionar[str(obj)] = {"px":pj,"py":pi}
else:
obj = tablero.create_rectangle(pj,pi,pj+_pasos,pi+_pasos, outline="black", fill="black")
else:
if j % 2!=0:
if _index<=2:
obj = tablero.create_image(pj,pi, anchor = NW, image=fichamarilla,tags=("amarilla","ficha"))
_posicionar[str(pj)+"-"+str(pi)] = obj
_posicionar[str(obj)] = {"px":pj,"py":pi}
else:
if _index<=4:
pass
else:
obj = tablero.create_image(pj,pi, anchor = NW, image=fichazul,tags=("azul","ficha"))
_posicionar[str(pj)+"-"+str(pi)] = obj
_posicionar[str(obj)] = {"px":pj,"py":pi}
else:
obj = tablero.create_rectangle(pj,pi,pj+_pasos,pi+_pasos, outline="black", fill="black")
_lista.append(obj)
_index+=1
for _key in _posicionar:
print(_key,_posicionar[_key])
sign = lambda x: (1, -1)[x < 0]
def buttonClick(event):
pass
def buttonPress(event):
global _datos_act
_item = tablero.find_closest(event.x, event.y)[0]
_tags = tablero.gettags(_item)
if "ficha" in _tags:
_item_key = str(_item)
_val = _posicionar.get(_item_key,None)
if _val is not None:
_datos_act["item"] = _item
floorX = event.x - (event.x % _pasos)
floorY = event.y - (event.y % _pasos)
_datos_act["px"] = event.x
_datos_act["py"] = event.y
_datos_act["fpx"] = floorX
_datos_act["fpy"] = floorY
_datos_act["relativeOffsetX"] = event.x-_val["px"]
_datos_act["relativeOffsetY"] = event.y-_val["py"]
else:
pass
else:
_datos_act["item"] = None
def buttonRelease(event):
global _posicionar
global _datos_act
global tablero
_item = _datos_act["item"]
if _item is None:
return
_px = _datos_act["px"]
_py = _datos_act["py"]
_fpx = _datos_act["fpx"]
_fpy = _datos_act["fpy"]
_item_key = str(_item)
_last_pos = _posicionar.get(_item_key,None)
_lpx = _last_pos["px"]
_lpy = _last_pos["py"]
_tags = tablero.gettags(_item)
_items = tablero.find_overlapping(event.x, event.y,event.x, event.y)
print(_items)
if _items[1]!=1:
floorX = event.x - (event.x % _pasos)
floorY = event.y - (event.y % _pasos)
_key = str(floorX)+"-"+str(floorY)
deltaFloorX = floorX - _fpx
deltaFloorY = floorY - _fpy
abs_deltaFloorX = abs(deltaFloorX)
abs_deltaFloorY = abs(deltaFloorY)
if(
deltaFloorX==0 or
deltaFloorY==0 or
abs_deltaFloorX!=abs_deltaFloorY or
abs_deltaFloorX>160
):
_deltaX = _lpx - _px + _datos_act["relativeOffsetX"]
_deltaY = _lpy - _py + _datos_act["relativeOffsetY"]
tablero.move(_item,_deltaX,_deltaY)
if turno=="j1":
turno="j2"
else:
turno="j1"
turnoj()
else:
if abs_deltaFloorX>80:
tfloorX = floorX - sign(deltaFloorX) * _pasos
tfloorY = floorY - sign(deltaFloorY) * _pasos
_tkey = str(tfloorX)+"-"+str(tfloorY)
_t1key = str(floorX)+"-"+str(floorY)
_val = _posicionar.get(_tkey,None)
_val1 = _posicionar.get(_t1key,None)
if _val is None:
_deltaX = _lpx - _px + _datos_act["relativeOffsetX"]
_deltaY = _lpy - _py + _datos_act["relativeOffsetY"]
tablero.move(_item,_deltaX,_deltaY)
if turno=="j1":
turno="j2"
else:
turno="j1"
turnoj()
else:
if _val1 is None:
_otags = tablero.gettags(_val)
if "ficha" in _otags:
if ("amarilla" in _tags and "azul" in _otags) or ("amarilla" in _otags and "azul" in _tags):
_deltaX = floorX -_px + _datos_act["relativeOffsetX"]
_deltaY = floorY - _py + _datos_act["relativeOffsetY"]
tablero.move(_item,_deltaX,_deltaY)
_posicionar[_key] = _item
_last_item_key = str(_posicionar[_item_key]["px"])+"-"+str(_posicionar[_item_key]["py"])
_posicionar[_last_item_key] = None
_posicionar[_item_key]["px"]=floorX
_posicionar[_item_key]["py"]=floorY
tablero.delete(_val);
_posicionar[str(_val)] = None
_posicionar[_tkey] = None
if turno=="j1":
turno="j2"
else:
turno="j1"
turnoj()
else:
_deltaX = _lpx - _px + _datos_act["relativeOffsetX"]
_deltaY = _lpy - _py + _datos_act["relativeOffsetY"]
tablero.move(_item,_deltaX,_deltaY)
else:
_deltaX = _lpx - _px + _datos_act["relativeOffsetX"]
_deltaY = _lpy - _py + _datos_act["relativeOffsetY"]
tablero.move(_item,_deltaX,_deltaY)
else:
_deltaX = _lpx - _px + _datos_act["relativeOffsetX"]
_deltaY = _lpy - _py + _datos_act["relativeOffsetY"]
tablero.move(_item,_deltaX,_deltaY)
else:
_val = _posicionar.get(_key,None)
if _val is not None:
_deltaX = _lpx - _px + _datos_act["relativeOffsetX"]
_deltaY = _lpy - _py + _datos_act["relativeOffsetY"]
tablero.move(_item,_deltaX,_deltaY)
else:
_deltaX = floorX -_px + _datos_act["relativeOffsetX"]
_deltaY = floorY - _py + _datos_act["relativeOffsetY"]
tablero.move(_item,_deltaX,_deltaY)
_posicionar[_key] = _item
_last_item_key = str(_posicionar[_item_key]["px"])+"-"+str(_posicionar[_item_key]["py"])
_posicionar[_last_item_key] = None
_posicionar[_item_key]["px"]=floorX
_posicionar[_item_key]["py"]=floorY
def buttonMotion(event):
global _datos_act
global tablero
_item = _datos_act["item"]
_px = _datos_act["px"]
_py = _datos_act["py"]
_deltaX = event.x - _px
_deltaY = event.y - _py
_datos_act["px"] = event.x
_datos_act["py"] = event.y
if _item is not None:
_tags = tablero.gettags(_item)
if "ficha" in _tags:
tablero.tag_raise(_item)
tablero.move(_item,_deltaX,_deltaY)
tablero.tag_bind("ficha","<Button-1>", buttonClick)
tablero.tag_bind("ficha","<ButtonPress-1>", buttonPress)
tablero.tag_bind("ficha","<ButtonRelease-1>", buttonRelease)
tablero.tag_bind("ficha","<B1-Motion>", buttonMotion)
damas.mainloop()
|
from main.page.desktop_v3.myshop.pe_myshop_base import *
from utils.function.general import *
from selenium.webdriver.common.by import By
from selenium.webdriver.common.action_chains import ActionChains
from random import randint
import time
class MyshopEtalasePage(MyshopSettingsBasePage):
# instance variable
_page = "myshop-etalase.pl"
_btn_add_loc = (By.CSS_SELECTOR, 'a#btn-add')
_ename_loc = (By.CSS_SELECTOR, "div.control-group div.controls input#e-name")
_submit_loc = (By.CSS_SELECTOR, "button.btn-action")
_link_edit_loc = (By.CSS_SELECTOR, "a.edit-etalase")
_link_delete_loc = (By.CSS_SELECTOR, "a.delete-etalase")
_src_drag_loc = (By.CSS_SELECTOR, "i.icon-move")
_dst_drag_loc = (By.CSS_SELECTOR, "li.li-711848")
_submit_del_loc = (By.XPATH, "//button[@name='submit']")
_link_edit_latest_loc = (By.XPATH, '/html/body/div[1]/div[5]/div/div[2]/div[2]/div/ol/li[last()]/div/div[3]/small/a[1]')
# test case input
list_in = ['', 'a', 'ما تم تؤكل', '什么被吃掉', 'វបានគេបរិភោគ', '131231']
def open(self, site=""):
self._open(site, self._page)
def drag_etalase(self):
try:
print("Drag and Drop")
time.sleep(2)
src = self.driver.find_element(*self._src_drag_loc)
dst = self.driver.find_element(*self._dst_drag_loc)
ActionChains(self.driver).drag_and_drop(src, dst).perform()
except Exception as inst:
print(inst)
def click_add_etalase(self):
self.check_visible_element(*self._btn_add_loc)
#self.check_clickable_element(*self._btn_add_loc)
#wait_visible_element(self.driver,*self._btn_add_loc)
#self.driver.refresh()
target_element = self.find_element(*self._btn_add_loc)
self.click_on_javascript(target_element)
#self.check_visible_element(By.CSS_SELECTOR, 'div.container-fluid div#rf div.dialog-footer button.jqmClose')
#self.driver.implicitly_wait(30)
time.sleep(2)
print ("asdwe")
try:
if "Anda hanya bisa menambah sampai 150 etalase." in self.driver.find_element_by_tag_name('body').text:
print ("Etalase already reached maximum.")
return 0
elif "Anda hanya bisa menambah sampai 150 etalase." not in self.driver.find_element_by_tag_name('body').text:
return 1
except:
print("Error before defining element of : Etalase")
return 1
#btn_add_etalase = self.find_element(*self._btn_add_loc)
#self._click(btn_add_etalase)
#print ("Button 'Add Etalase' not found!")
def input_etalase_name(self, input1=""):
self.check_visible_element(*self._ename_loc)
self.find_element(*self._ename_loc).clear()
self.find_element(*self._ename_loc).send_keys(input1)
btn_submit = self.find_element(*self._submit_loc)
self._click(btn_submit)
time.sleep(2)
assert "Maaf, Permohonan Anda tidak dapat diproses saat ini. Mohon dicoba kembali." not in self.driver.find_element_by_tag_name('body').text
assert "Sorry, your request failed to be processed. Please try again." not in self.driver.find_element_by_tag_name('body').text
print ("Etalase has been added successfully!")
def click_edit_etalase(self):
self.check_visible_element(*self._link_edit_latest_loc)
self.driver.find_element(*self._link_edit_latest_loc).click()
def delete_etalase(self, N=""):
self.check_visible_element(*self._link_delete_loc)
time.sleep(3)
self.find_element(*self._link_delete_loc).click()
time.sleep(3)
self.find_element(*self._submit_del_loc).click()
time.sleep(3)
print ("Etalase[Top] has been deleted successfully!")
def __str__(self):
return "Page " + self.driver.title |
# Copyright 2022 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import annotations
from dataclasses import dataclass
from pants.engine.internals.native_engine import AddressInput
from pants.engine.target import (
COMMON_TARGET_FIELDS,
AsyncFieldMixin,
Dependencies,
FieldSet,
MultipleSourcesField,
SingleSourceField,
StringField,
StringSequenceField,
Target,
TargetFilesGenerator,
generate_multiple_sources_field_help_message,
)
from pants.jvm.target_types import (
JunitTestExtraEnvVarsField,
JunitTestSourceField,
JunitTestTimeoutField,
JvmJdkField,
JvmMainClassNameField,
JvmProvidesTypesField,
JvmResolveField,
JvmRunnableSourceFieldSet,
)
from pants.util.strutil import help_text
class KotlinSourceField(SingleSourceField):
expected_file_extensions = (".kt",)
class KotlinGeneratorSourcesField(MultipleSourcesField):
expected_file_extensions = (".kt",)
class KotlincConsumedPluginIdsField(StringSequenceField):
help = help_text(
"""
The IDs of Kotlin compiler plugins that this source file requires.
The plugin must be defined by a corresponding `kotlinc_plugin` AND `jvm_artifact` target,
and must be present in this target's resolve's lockfile.
If not specified, this will default to the plugins specified in
`[kotlinc].plugins_for_resolve` for this target's resolve.
"""
)
alias = "kotlinc_plugins"
required = False
@dataclass(frozen=True)
class KotlinFieldSet(JvmRunnableSourceFieldSet):
required_fields = (KotlinSourceField,)
sources: KotlinSourceField
@dataclass(frozen=True)
class KotlinGeneratorFieldSet(FieldSet):
required_fields = (KotlinGeneratorSourcesField,)
sources: KotlinGeneratorSourcesField
class KotlinDependenciesField(Dependencies):
pass
# -----------------------------------------------------------------------------------------------
# `kotlin_source` and `kotlin_sources` targets
# -----------------------------------------------------------------------------------------------
class KotlinSourceTarget(Target):
alias = "kotlin_source"
core_fields = (
*COMMON_TARGET_FIELDS,
KotlinDependenciesField,
KotlinSourceField,
KotlincConsumedPluginIdsField,
JvmResolveField,
JvmProvidesTypesField,
JvmJdkField,
JvmMainClassNameField,
)
help = "A single Kotlin source file containing application or library code."
class KotlinSourcesGeneratorSourcesField(KotlinGeneratorSourcesField):
default = ("*.kt",)
help = generate_multiple_sources_field_help_message(
"Example: `sources=['Example.kt', 'New*.kt', '!OldIgnore.kt']`"
)
class KotlinSourcesGeneratorTarget(TargetFilesGenerator):
alias = "kotlin_sources"
core_fields = (
*COMMON_TARGET_FIELDS,
KotlinSourcesGeneratorSourcesField,
)
generated_target_cls = KotlinSourceTarget
copied_fields = COMMON_TARGET_FIELDS
moved_fields = (
KotlinDependenciesField,
KotlincConsumedPluginIdsField,
JvmResolveField,
JvmJdkField,
JvmProvidesTypesField,
JvmMainClassNameField,
)
help = "Generate a `kotlin_source` target for each file in the `sources` field."
# -----------------------------------------------------------------------------------------------
# `kotlin_junit_tests`
# -----------------------------------------------------------------------------------------------
class KotlinJunitTestSourceField(KotlinSourceField, JunitTestSourceField):
pass
class KotlinJunitTestDependenciesField(KotlinDependenciesField):
pass
class KotlinJunitTestTarget(Target):
alias = "kotlin_junit_test"
core_fields = (
*COMMON_TARGET_FIELDS,
KotlinJunitTestDependenciesField,
KotlinJunitTestSourceField,
KotlincConsumedPluginIdsField,
JunitTestTimeoutField,
JunitTestExtraEnvVarsField,
JvmResolveField,
JvmJdkField,
JvmProvidesTypesField,
)
help = "A single Kotlin test, run with JUnit."
class KotlinJunitTestsGeneratorSourcesField(KotlinGeneratorSourcesField):
default = ("*Test.kt",)
help = generate_multiple_sources_field_help_message(
"Example: `sources=['*Test.kt', '!TestIgnore.kt']`"
)
class KotlinJunitTestsGeneratorTarget(TargetFilesGenerator):
alias = "kotlin_junit_tests"
core_fields = (
*COMMON_TARGET_FIELDS,
KotlinJunitTestsGeneratorSourcesField,
)
generated_target_cls = KotlinJunitTestTarget
copied_fields = COMMON_TARGET_FIELDS
moved_fields = (
KotlinJunitTestDependenciesField,
KotlincConsumedPluginIdsField,
JunitTestTimeoutField,
JunitTestExtraEnvVarsField,
JvmResolveField,
JvmJdkField,
JvmProvidesTypesField,
)
help = "Generate a `kotlin_junit_test` target for each file in the `sources` field."
# -----------------------------------------------------------------------------------------------
# `kotlinc_plugin` target type
# -----------------------------------------------------------------------------------------------
class KotlincPluginArtifactField(StringField, AsyncFieldMixin):
alias = "artifact"
required = True
value: str
help = "The address of a `jvm_artifact` that defines a plugin for `kotlinc`."
def to_address_input(self) -> AddressInput:
return AddressInput.parse(
self.value,
relative_to=self.address.spec_path,
description_of_origin=(
f"the `{self.alias}` field in the `{KotlincPluginTarget.alias}` target {self.address}"
),
)
class KotlincPluginIdField(StringField):
alias = "plugin_id"
help = help_text(
"""
The ID for `kotlinc` to use when setting options for the plugin.
If not set, the plugin ID defaults to the target name.
"""
)
class KotlincPluginArgsField(StringSequenceField):
alias = "plugin_args"
help = help_text(
"""
Optional list of argument to pass to the plugin.
"""
)
class KotlincPluginTarget(Target):
alias = "kotlinc_plugin"
core_fields = (
*COMMON_TARGET_FIELDS,
KotlincPluginArtifactField,
KotlincPluginIdField,
KotlincPluginArgsField,
)
help = help_text(
"""
A plugin for `kotlinc`.
To enable a `kotlinc` plugin, define a target with this target type, and set the `artifact` field to the
address of a `jvm_artifact` target that provides the plugin. Set the `plugin_id` field to the ID of the
plugin if that name cannot be inferred from the `name` of this target.
The standard `kotlinc` plugins are available via the following artifact coordinates and IDs:
* All-open: `org.jetbrains.kotlin:kotlin-allopen:VERSION` (ID: `all-open`)
* No-arg: `org.jetbrains.kotlin:kotlin-noarg:VERSION` (ID: `no-arg`)
* SAM with receiver: `org.jetbrains.kotlin:kotlin-sam-with-receiver:VERSION` (ID: `sam-with-receiver`)
* kapt (annotation processor): `org.jetbrains.kotlin:org.jetbrains.kotlin:kotlin-annotation-processing-embeddable:VERSION` (ID: `kapt3`)
* Serialization: `org.jetbrains.kotlin:kotlin-serialization:VERSION` (ID: `serialization`)
"""
)
def rules():
return [
*KotlinFieldSet.jvm_rules(),
]
|
from graphics import *
import math
import time
win = GraphWin("MY PIE CHART USING BRESENHAM ALGORITHM", 900, 900)
def main(xc=450,yc=450,r=50):
x=0
y=r
d=3-2*r
x1=xc
y1=yc
sumt = 0
n = int(input())
p1=list(map(int, input().split()))
th = []
for i in range(len(p1)):
sumt += ((p1[i]/100)*360)
theta = sumt
th.append(theta)
th = [x*(math.pi/180) for x in th]
print(th)
x2 = [int((r*math.cos(x))+xc) for x in th]
y2 = [int(yc+(r*math.sin(x))) for x in th]
coor = []
for _ in range(len(x2)):
coor.append((x2[_],y2[_]))
print(coor)
x3=int(r*math.cos(0)+xc)
y3=int(yc-r*math.sin(0))
while(y>=x):
draw_circle(xc,yc,x,y)
x=x+1
if(d>0):
y=y-1
d = d + 4 * (x - y) + 10
else:
d = d + 4 * x + 6
draw_circle(xc,yc,x,y)
for (x2,y2) in coor:
aLine = Line(Point(x1,y1), Point(x2,y2))
aLine.setFill("green")
aLine.draw(win)
win.getMouse()
win.close()
def put_pixel(x,y,color="red"):
global win
p = Point(x,y)
p.setFill(color)
p.draw(win)
time.sleep(.002)
def draw_circle(xc,yc,x,y):
put_pixel(xc+x,yc+y)
put_pixel(xc-x,yc+y)
put_pixel(xc+x,yc-y)
put_pixel(xc-x,yc-y)
put_pixel(xc+y,yc+x)
put_pixel(xc-y,yc+x)
put_pixel(xc+y,yc-x)
put_pixel(xc-y,yc-x)
def line(x1,y1,x2,y2):
dx=abs(x2-x1)
dy=abs(y2-y1)
xm=x1
ym=y1
put_pixel(xm,ym,"green")
e=dy-(dx/2)
while(xm<=x2):
xm=xm+1
if(e<0):
e=e+dy
else:
e=e+(dy-dx)
ym=ym+1
put_pixel(xm,ym,"green")
main()
|
import re
import collections
import math
RE_WHITELIST_FILENAME = re.compile("\.devsecops-ci$"
"|.*require.*\.txt$"
"|.*\.pbxproj$"
"|.*\.xcworkspace\/"
"|.*package\.json$"
"|.*package-lock\.json$"
"|.*yarn\.lock$"
"|.*\.gpg$"
"|.*\.pub$"
"|.*\.htm[l]$"
"|.*\.css$",
re.IGNORECASE)
RE_BLACKLIST_FILENAME = re.compile(".*\.cer[t]$"
"|.*\.key$"
"|.*\.pem$",
re.IGNORECASE)
RE_BLACKLIST_STRING = re.compile(".*"
"(API[_]?KEY"
"|SECRET"
")\s?=\s?[\'\"]?[0-9a-zA-Z\~\!\@\#\%\^\&\*\(\)\-\+\`\[\]\{\}\<\>\?\/\=]{12,}"
"|SENTRY_DSN\s?=\s?[\'\"]?http",
re.IGNORECASE)
RE_WORD_DELIMITER = re.compile("\s|\\|{|}|`|=|\(|\)|\[|\]|\/|<|>|\:|\@|\.")
# add known false posivive words here...
FALSE_POSITIVES = []
class SecretChecker():
@staticmethod
def whitelist_filename(filename):
matched = RE_WHITELIST_FILENAME.match(filename)
return matched is not None
@staticmethod
def blacklisted_filename(filename):
matched = RE_BLACKLIST_FILENAME.match(filename)
return matched is not None
@staticmethod
def blacklisted_keyword(text, whitelist):
if RE_BLACKLIST_STRING.match(text):
for wl in whitelist:
if re.compile(wl, re.IGNORECASE).match(text):
return False
return True
return False
@staticmethod
def _shannon_entropy(s):
probabilities = [n_x / len(s) for x, n_x
in collections.Counter(s).items()]
e_x = [-p_x * math.log(p_x, 2) for p_x in probabilities]
return sum(e_x)
@staticmethod
def check_entropy(text, whitelist, minlen=20, entropy=4.5):
words = RE_WORD_DELIMITER.split(text)
for word in words:
if not word: continue
if word[0] == '\'':
word = word.strip('\'')
elif word[0] == '\"':
word = word.strip('\"')
if len(word) < minlen:
continue
if word in FALSE_POSITIVES:
continue
whitelisted = False
for wl in whitelist:
if re.compile(wl, re.IGNORECASE).match(word):
whitelisted = True
break
if whitelisted:
continue
h = SecretChecker._shannon_entropy(word)
if h >= entropy:
return word
return None
|
from player import *
from board import *
class Human(Player):
def __init__(self):
self.player = Player(False)
def __eq__(self,other):
return self.player == other.player
def makeMove(self, board, col):
for i in range(len(board.values)-1,-1,-1):
(x, _) = board.values[i][col]
if (not(x)):
board.values[i][col] = (True, False)
break
board = Board()
hum = Human()
hum.makeMove(board,2)
hum.makeMove(board,3)
print(board.gameOver())
hum.makeMove(board,3)
hum.makeMove(board,3)
hum.makeMove(board,3)
print(board.gameOver())
hum.makeMove(board,5)
|
import sys
import os
f = open("C:/Users/user/Documents/atCoderProblem/import.txt","r")
sys.stdin = f
# -*- coding: utf-8 -*-
a,op,b = input().split()
a,b = int(a),int(b)
if op == "+":
print(a+b)
else:
print(a-b)
|
import cv2
import argparse
from matplotlib import pyplot as plt
ap=argparse.ArgumentParser()
ap.add_argument('-i','--image',required=True,help='Path to the image')
args=vars(ap.parse_args())
image=cv2.imread(args['image'])
image=cv2.cvtColor(image,cv2.COLOR_BGR2GRAY)
cv2.imshow('Original',image)
hist=cv2.calcHist([image],[0],None,[256],[0,256])
plt.figure()
plt.title('Grayscale Histogram')
plt.xlabel('Bins')
plt.ylabel('# of pixels')
plt.plot(hist) #gri tonlamali histogramlari siler
plt.xlim([0,256])
plt.show()
cv2.waitKey()
|
from django.contrib import admin
from . models import User, Lead, Agent, UserProfile, Category
admin.site.register(User)
admin.site.register(UserProfile)
admin.site.register(Agent)
admin.site.register(Lead)
admin.site.register(Category)
|
from . import *
class Inventory():
def __init__(self):
self.capacity = 10
self.contents = ["Heineken ", "Carlsberg"]
def remove_item(self, index):
temp = self.contents[index - 1]
del self.contents[index - 1]
return "Removed " + temp + " Succesfully!"
def add_item(self, item):
if len(self.contents) < self.capacity:
self.contents.append(item)
return item + " succefully added to inventory"
elif len(self.contents) >= self.capacity:
return "Your inventory is full"
def view(self):
return self.contents |
"""
Test cases for ViewTestCase which implements view comparison.
"""
from utils import ViewTestCase
from dataviews import SheetView, SheetStack
from dataviews.boundingregion import BoundingBox
from dataviews.ndmapping import Dimension
import numpy as np
class SheetViewTestCase(ViewTestCase):
def setUp(self):
self.arr1 = np.array([[1,2], [3,4]])
self.arr2 = np.array([[10,2], [3,4]])
self.arr3 = np.array([[10,2], [3,40]])
# Varying arrays, default bounds
self.sv1 = SheetView(self.arr1, BoundingBox())
self.sv2 = SheetView(self.arr2, BoundingBox())
self.sv3 = SheetView(self.arr3, BoundingBox())
# Varying arrays, different bounds
self.sv4 = SheetView(self.arr1, BoundingBox(radius=0.3))
self.sv5 = SheetView(self.arr2, BoundingBox(radius=0.3))
class SheetOverlayTestCase(SheetViewTestCase):
def setUp(self):
super(SheetOverlayTestCase, self).setUp()
# Two overlays of depth two with different layers
self.overlay1_depth2 = (self.sv1 * self.sv2)
self.overlay2_depth2 = (self.sv1 * self.sv3)
# Overlay of depth 2 with different bounds
self.overlay3_depth2 = (self.sv4 * self.sv5)
# # Overlay of depth 3
self.overlay4_depth3 = (self.sv1 * self.sv2 * self.sv3)
class StackTestCase(SheetOverlayTestCase):
def setUp(self):
super(StackTestCase, self).setUp()
# Example 1D stack
self.stack1_1D = SheetStack(dimensions=['int'])
self.stack1_1D[0] = self.sv1
self.stack1_1D[1] = self.sv2
# Changed keys...
self.stack2_1D = SheetStack(dimensions=['int'])
self.stack2_1D[1] = self.sv1
self.stack2_1D[2] = self.sv2
# Changed number of keys...
self.stack3_1D = SheetStack(dimensions=['int'])
self.stack3_1D[1] = self.sv1
self.stack3_1D[2] = self.sv2
self.stack3_1D[3] = self.sv3
# Changed values...
self.stack4_1D = SheetStack(dimensions=['int'])
self.stack4_1D[0] = self.sv1
self.stack4_1D[1] = self.sv3
# Changed bounds...
self.stack5_1D = SheetStack(dimensions=['int'])
self.stack5_1D[0] = self.sv4
self.stack5_1D[1] = self.sv5
# Example dimension label
self.stack6_1D = SheetStack(dimensions=['int_v2'])
self.stack6_1D[0] = self.sv1
self.stack6_1D[1] = self.sv2
# A SheetStack of Overlays
self.stack7_1D = SheetStack(dimensions=['int'])
self.stack7_1D[0] = self.overlay1_depth2
self.stack7_1D[1] = self.overlay2_depth2
# A different SheetStack of Overlays
self.stack8_1D = SheetStack(dimensions=['int'])
self.stack8_1D[0] = self.overlay2_depth2
self.stack8_1D[1] = self.overlay1_depth2
# Example 2D stack
self.stack1_2D = SheetStack(dimensions=['int', Dimension('float')])
self.stack1_2D[0, 0.5] = self.sv1
self.stack1_2D[1, 1.0] = self.sv2
# Changed 2D keys...
self.stack2_2D = SheetStack(dimensions=['int', Dimension('float')])
self.stack2_2D[0, 1.0] = self.sv1
self.stack2_2D[1, 1.5] = self.sv2
class SheetComparisonTest(SheetViewTestCase):
"""
This tests the ViewTestCase class which is an important component
of other tests.
"""
def test_equal(self):
self.assertEqual(self.sv1, self.sv1)
def test_unequal_arrays(self):
try:
self.assertEqual(self.sv1, self.sv2)
raise AssertionError("Array mismatch not detected")
except AssertionError as e:
assert e.message.startswith('\nArrays are not almost equal to 6 decimals')
def test_bounds_mismatch(self):
try:
self.assertEqual(self.sv1, self.sv4)
except AssertionError as e:
assert e.message.startswith('BoundingBoxes are mismatched.')
class SheetOverlayComparisonTest(SheetOverlayTestCase):
def test_depth_mismatch(self):
try:
self.assertEqual(self.overlay1_depth2, self.overlay4_depth3)
except AssertionError as e:
assert e.message.startswith("Overlays have different lengths.")
def test_element_mismatch(self):
try:
self.assertEqual(self.overlay1_depth2, self.overlay2_depth2)
except AssertionError as e:
assert e.message.startswith('\nArrays are not almost equal to 6 decimals')
def test_bounds_mismatch(self):
try:
self.assertEqual(self.overlay1_depth2, self.overlay3_depth2)
except AssertionError as e:
assert e.message.startswith('BoundingBoxes are mismatched.')
class StackComparisonTest(StackTestCase):
def test_dimension_mismatch(self):
try:
self.assertEqual(self.stack1_1D, self.stack1_2D)
raise AssertionError("Mismatch in dimension number not detected.")
except AssertionError as e:
assert e.message.startswith("Stacks have different numbers of dimensions.")
def test_dimension_label_mismatch(self):
try:
self.assertEqual(self.stack1_1D, self.stack6_1D)
raise AssertionError("Mismatch in dimension labels not detected.")
except AssertionError as e:
assert e.message.startswith("Stacks have different dimension labels.")
def test_key_len_mismatch(self):
try:
self.assertEqual(self.stack1_1D, self.stack3_1D)
raise AssertionError("Mismatch in stack key number not detected.")
except AssertionError as e:
assert e.message.startswith("Stacks have different numbers of keys.")
def test_key_mismatch(self):
try:
self.assertEqual(self.stack1_1D, self.stack2_1D)
raise AssertionError("Mismatch in stack keys not detected.")
except AssertionError as e:
assert e.message.startswith("Stacks have different sets of keys.")
def test_bounds_mismatch(self):
try:
self.assertEqual(self.stack1_1D, self.stack5_1D)
raise AssertionError("Mismatch in element bounding boxes.")
except AssertionError as e:
assert e.message.startswith("BoundingBoxes are mismatched.")
def test_element_mismatch(self):
try:
self.assertEqual(self.stack1_1D, self.stack4_1D)
raise AssertionError("Element mismatch in array data not detected.")
except AssertionError as e:
assert e.message.startswith('\nArrays are not almost equal to 6 decimals')
def test_overlay_mismatch(self):
try:
self.assertEqual(self.stack7_1D, self.stack8_1D)
raise AssertionError("Overlay element mismatch in array data not detected.")
except AssertionError as e:
assert e.message.startswith('\nArrays are not almost equal to 6 decimals')
if __name__ == "__main__":
import nose
nose.runmodule(argv=[sys.argv[0], "--logging-level", "ERROR"])
|
#
# Basic protocol definitions for the beng-proxy translation server
# protocol.
#
# Author: Max Kellermann <mk@cm4all.com>
#
TRANSLATE_BEGIN = 1
TRANSLATE_END = 2
TRANSLATE_HOST = 3
TRANSLATE_URI = 4
TRANSLATE_STATUS = 5
TRANSLATE_PATH = 6
TRANSLATE_CONTENT_TYPE = 7
TRANSLATE_HTTP = 8
TRANSLATE_REDIRECT = 9
TRANSLATE_FILTER = 10
TRANSLATE_PROCESS = 11
TRANSLATE_SESSION = 12
TRANSLATE_PARAM = 13
TRANSLATE_USER = 14
TRANSLATE_LANGUAGE = 15
TRANSLATE_REMOTE_HOST = 16
TRANSLATE_PATH_INFO = 17
TRANSLATE_SITE = 18
TRANSLATE_CGI = 19
TRANSLATE_DOCUMENT_ROOT = 20
TRANSLATE_WIDGET_TYPE = 21
TRANSLATE_CONTAINER = 22
TRANSLATE_ADDRESS = 23
TRANSLATE_ADDRESS_STRING = 24
TRANSLATE_JAILCGI = 26
TRANSLATE_INTERPRETER = 27
TRANSLATE_ACTION = 28
TRANSLATE_SCRIPT_NAME = 29
TRANSLATE_AJP = 30
TRANSLATE_DOMAIN = 31
TRANSLATE_STATEFUL = 32
TRANSLATE_FASTCGI = 33
TRANSLATE_VIEW = 34
TRANSLATE_USER_AGENT = 35
TRANSLATE_MAX_AGE = 36
TRANSLATE_VARY = 37
TRANSLATE_QUERY_STRING = 38
TRANSLATE_PIPE = 39
TRANSLATE_BASE = 40
TRANSLATE_DELEGATE = 41
TRANSLATE_INVALIDATE = 42
TRANSLATE_LOCAL_ADDRESS = 43
TRANSLATE_LOCAL_ADDRESS_STRING = 44
TRANSLATE_APPEND = 45
TRANSLATE_DISCARD_SESSION = 46
TRANSLATE_SCHEME = 47
TRANSLATE_REQUEST_HEADER_FORWARD = 48
TRANSLATE_RESPONSE_HEADER_FORWARD = 49
TRANSLATE_DEFLATED = 50
TRANSLATE_GZIPPED = 51
TRANSLATE_PAIR = 52
TRANSLATE_UNTRUSTED = 53
TRANSLATE_BOUNCE = 54
TRANSLATE_ARGS = 55
TRANSLATE_WWW_AUTHENTICATE = 56
TRANSLATE_AUTHENTICATION_INFO = 57
TRANSLATE_AUTHORIZATION = 58
TRANSLATE_HEADER = 59
TRANSLATE_UNTRUSTED_PREFIX = 60
TRANSLATE_SECURE_COOKIE = 61
TRANSLATE_FILTER_4XX = 62
TRANSLATE_ERROR_DOCUMENT = 63
TRANSLATE_CHECK = 64
TRANSLATE_PREVIOUS = 65
TRANSLATE_WAS = 66
TRANSLATE_HOME = 67
TRANSLATE_REALM = 68
TRANSLATE_UNTRUSTED_SITE_SUFFIX = 69
TRANSLATE_TRANSPARENT = 70
TRANSLATE_STICKY = 71
TRANSLATE_DUMP_HEADERS = 72
TRANSLATE_COOKIE_HOST = 73
TRANSLATE_PROCESS_CSS = 74
TRANSLATE_PREFIX_CSS_CLASS = 75
TRANSLATE_FOCUS_WIDGET = 76
TRANSLATE_ANCHOR_ABSOLUTE = 77
TRANSLATE_PREFIX_XML_ID = 78
TRANSLATE_REGEX = 79
TRANSLATE_INVERSE_REGEX = 80
TRANSLATE_PROCESS_TEXT = 81
TRANSLATE_WIDGET_INFO = 82
TRANSLATE_EXPAND_PATH_INFO = 83
TRANSLATE_EXPAND_PATH = 84
TRANSLATE_COOKIE_DOMAIN = 85
TRANSLATE_LOCAL_URI = 86
TRANSLATE_AUTO_BASE = 87
TRANSLATE_UA_CLASS = 88
TRANSLATE_PROCESS_STYLE = 89
TRANSLATE_DIRECT_ADDRESSING = 90
TRANSLATE_SELF_CONTAINER = 91
TRANSLATE_GROUP_CONTAINER = 92
TRANSLATE_WIDGET_GROUP = 93
TRANSLATE_VALIDATE_MTIME = 94
TRANSLATE_NFS_SERVER = 95
TRANSLATE_NFS_EXPORT = 96
TRANSLATE_LHTTP_PATH = 97
TRANSLATE_LHTTP_URI = 98
TRANSLATE_EXPAND_LHTTP_URI = 99
TRANSLATE_LHTTP_HOST = 100
TRANSLATE_CONCURRENCY = 101
TRANSLATE_WANT_FULL_URI = 102
TRANSLATE_USER_NAMESPACE = 103
TRANSLATE_NETWORK_NAMESPACE = 104
TRANSLATE_EXPAND_APPEND = 105
TRANSLATE_EXPAND_PAIR = 106
TRANSLATE_PID_NAMESPACE = 107
TRANSLATE_PIVOT_ROOT = 108
TRANSLATE_MOUNT_PROC = 109
TRANSLATE_MOUNT_HOME = 110
TRANSLATE_MOUNT_TMP_TMPFS = 111
TRANSLATE_UTS_NAMESPACE = 112
TRANSLATE_BIND_MOUNT = 113
TRANSLATE_RLIMITS = 114
TRANSLATE_WANT = 115
TRANSLATE_UNSAFE_BASE = 116
TRANSLATE_EASY_BASE = 117
TRANSLATE_REGEX_TAIL = 118
TRANSLATE_REGEX_UNESCAPE = 119
TRANSLATE_FILE_NOT_FOUND = 120
TRANSLATE_CONTENT_TYPE_LOOKUP = 121
TRANSLATE_SUFFIX = 122
TRANSLATE_DIRECTORY_INDEX = 123
TRANSLATE_EXPIRES_RELATIVE = 124
TRANSLATE_EXPAND_REDIRECT = 125
TRANSLATE_EXPAND_SCRIPT_NAME = 126
TRANSLATE_TEST_PATH = 127
TRANSLATE_EXPAND_TEST_PATH = 128
TRANSLATE_REDIRECT_QUERY_STRING = 129
TRANSLATE_ENOTDIR = 130
TRANSLATE_STDERR_PATH = 131
TRANSLATE_COOKIE_PATH = 132
TRANSLATE_AUTH = 133
TRANSLATE_SETENV = 134
TRANSLATE_EXPAND_SETENV = 135
TRANSLATE_EXPAND_URI = 136
TRANSLATE_EXPAND_SITE = 137
TRANSLATE_REQUEST_HEADER = 138
TRANSLATE_EXPAND_REQUEST_HEADER = 139
TRANSLATE_AUTO_GZIPPED = 140
TRANSLATE_EXPAND_DOCUMENT_ROOT = 141
TRANSLATE_PROBE_PATH_SUFFIXES = 142
TRANSLATE_PROBE_SUFFIX = 143
TRANSLATE_AUTH_FILE = 144
TRANSLATE_EXPAND_AUTH_FILE = 145
TRANSLATE_APPEND_AUTH = 146
TRANSLATE_EXPAND_APPEND_AUTH = 147
TRANSLATE_LISTENER_TAG = 148
TRANSLATE_EXPAND_COOKIE_HOST = 149
TRANSLATE_EXPAND_BIND_MOUNT = 150
TRANSLATE_NON_BLOCKING = 151
TRANSLATE_READ_FILE = 152
TRANSLATE_EXPAND_READ_FILE = 153
TRANSLATE_EXPAND_HEADER = 154
TRANSLATE_REGEX_ON_HOST_URI = 155
TRANSLATE_SESSION_SITE = 156
TRANSLATE_IPC_NAMESPACE = 157
TRANSLATE_AUTO_DEFLATE = 158
TRANSLATE_EXPAND_HOME = 159
TRANSLATE_EXPAND_STDERR_PATH = 160
TRANSLATE_REGEX_ON_USER_URI = 161
TRANSLATE_AUTO_GZIP = 162
TRANSLATE_INTERNAL_REDIRECT = 163
TRANSLATE_LOGIN = 164
TRANSLATE_UID_GID = 165
TRANSLATE_PASSWORD = 166
TRANSLATE_REFENCE = 167
TRANSLATE_SERVICE = 168
TRANSLATE_INVERSE_REGEX_UNESCAPE = 169
TRANSLATE_BIND_MOUNT_RW = 170
TRANSLATE_EXPAND_BIND_MOUNT_RW = 171
TRANSLATE_UNTRUSTED_RAW_SITE_SUFFIX = 172
TRANSLATE_MOUNT_TMPFS = 173
TRANSLATE_REVEAL_USER = 174
TRANSLATE_REALM_FROM_AUTH_BASE = 175
TRANSLATE_NO_NEW_PRIVS = 176
TRANSLATE_CGROUP = 177
TRANSLATE_CGROUP_SET = 178
TRANSLATE_EXTERNAL_SESSION_MANAGER = 179
TRANSLATE_EXTERNAL_SESSION_KEEPALIVE = 180
TRANSLATE_CRON = 181
TRANSLATE_BIND_MOUNT_EXEC = 182
TRANSLATE_EXPAND_BIND_MOUNT_EXEC = 183
TRANSLATE_STDERR_NULL = 184
TRANSLATE_EXECUTE = 185
TRANSLATE_FORBID_USER_NS = 186
TRANSLATE_POOL = 187
TRANSLATE_MESSAGE = 188
TRANSLATE_CANONICAL_HOST = 189
TRANSLATE_SHELL = 190
TRANSLATE_TOKEN = 191
TRANSLATE_STDERR_PATH_JAILED = 192
TRANSLATE_UMASK = 193
TRANSLATE_CGROUP_NAMESPACE = 194
TRANSLATE_REDIRECT_FULL_URI = 195
TRANSLATE_FORBID_MULTICAST = 196
TRANSLATE_HTTPS_ONLY = 197
TRANSLATE_FORBID_BIND = 198
TRANSLATE_NETWORK_NAMESPACE_NAME = 199
TRANSLATE_MOUNT_ROOT_TMPFS = 200
TRANSLATE_CHILD_TAG = 201
TRANSLATE_CERTIFICATE = 202
TRANSLATE_UNCACHED = 203
TRANSLATE_PID_NAMESPACE_NAME = 204
TRANSLATE_SUBST_YAML_FILE = 205
TRANSLATE_ALT_HOST = 206
TRANSLATE_SUBST_ALT_SYNTAX = 207
TRANSLATE_CACHE_TAG = 208
TRANSLATE_REQUIRE_CSRF_TOKEN = 209
TRANSLATE_SEND_CSRF_TOKEN = 210
TRANSLATE_HTTP2 = 211
TRANSLATE_REQUEST_URI_VERBATIM = 212
TRANSLATE_DEFER = 213
TRANSLATE_STDERR_POND = 214
TRANSLATE_CHAIN = 215
TRANSLATE_BREAK_CHAIN = 216
TRANSLATE_CHAIN_HEADER = 217
TRANSLATE_FILTER_NO_BODY = 218
TRANSLATE_HTTP_AUTH = 219
TRANSLATE_TOKEN_AUTH = 220
TRANSLATE_AUTH_TOKEN = 221
TRANSLATE_MOUNT_EMPTY = 222
TRANSLATE_TINY_IMAGE = 223
TRANSLATE_ATTACH_SESSION = 224
TRANSLATE_DISCARD_REALM_SESSION = 225
TRANSLATE_LIKE_HOST = 226
TRANSLATE_LAYOUT = 227
TRANSLATE_RECOVER_SESSION = 228
TRANSLATE_OPTIONAL = 229
TRANSLATE_AUTO_BROTLI_PATH = 230
TRANSLATE_TRANSPARENT_CHAIN = 231
TRANSLATE_STATS_TAG = 232
TRANSLATE_MOUNT_DEV = 233
TRANSLATE_BIND_MOUNT_FILE = 234
TRANSLATE_EAGER_CACHE = 235
TRANSLATE_AUTO_FLUSH_CACHE = 236
TRANSLATE_PARALLELISM = 237
TRANSLATE_EXPIRES_RELATIVE_WITH_QUERY = 238
TRANSLATE_CGROUP_XATTR = 239
TRANSLATE_CHECK_HEADER = 240
TRANSLATE_PLAN = 241
TRANSLATE_CHDIR = 242
TRANSLATE_SESSION_COOKIE_SAME_SITE = 243
TRANSLATE_NO_PASSWORD = 244
TRANSLATE_REALM_SESSION = 245
TRANSLATE_WRITE_FILE = 246
TRANSLATE_PATH_EXISTS = 247
TRANSLATE_AUTHORIZED_KEYS = 248
TRANSLATE_AUTO_BROTLI = 249
TRANSLATE_DISPOSABLE = 250
TRANSLATE_DISCARD_QUERY_STRING = 251
TRANSLATE_MOUNT_NAMED_TMPFS = 252
TRANSLATE_PROXY = TRANSLATE_HTTP # deprecated
TRANSLATE_LHTTP_EXPAND_URI = TRANSLATE_EXPAND_LHTTP_URI # deprecated
HEADER_FORWARD_NO = 0
HEADER_FORWARD_YES = 1
HEADER_FORWARD_MANGLE = 2
HEADER_FORWARD_BOTH = 3
HEADER_GROUP_ALL = -1
HEADER_GROUP_IDENTITY = 0
HEADER_GROUP_CAPABILITIES = 1
HEADER_GROUP_COOKIE = 2
HEADER_GROUP_OTHER = 3
HEADER_GROUP_FORWARD = 4
HEADER_GROUP_CORS = 5
HEADER_GROUP_SECURE = 6
HEADER_GROUP_TRANSFORMATION = 7
HEADER_GROUP_LINK = 8
HEADER_GROUP_SSL = 9
HEADER_GROUP_AUTH = 10
|
import pickle
import numpy as np
import scipy.stats
from abc import ABCMeta, abstractmethod
class KeypressEventReceiver(object):
'''A class that receives keypress events through a callback'''
__metaclass__=ABCMeta
KEY_DOWN, KEY_UP= 0, 1
@abstractmethod
def on_key(self, key, event_type, time_ms):
'''key is a integer
event_type is in (KEY_DOWN, KEY_UP)
time_ms is the time when the key was (de/)pressed
'''
pass
class VersionedSerializableClass( object ):
__metaclass__=ABCMeta
FILE_EXTENSION=".pickle"
CLASS_VERSION= -1
def __init__(self, *args, **kwargs):
self._class_version= self.CLASS_VERSION
def save_to_file(self, filename):
with open(filename+self.FILE_EXTENSION, 'wb') as f:
self._serialize_to_file( f )
@classmethod
def load_from_file( cls, filename):
import os
if not os.path.exists(filename):
filename+=cls.FILE_EXTENSION
with open(filename, 'rb') as f:
instance= cls._deserialize_from_file( f )
load_error=None
if not isinstance( instance, cls ):
load_error= 'Unexpected instance type'
elif instance._class_version!=cls.CLASS_VERSION:
load_error= 'Class version mismatch (expected "{}", got "{}")'.format( cls.CLASS_VERSION, instance._class_version)
if load_error:
raise TypeError("Failed to load serialized data from {}: {}".format(filename, load_error))
return instance
@classmethod
def load_from_dir( cls, directory ):
import os
d= directory
filenames= [f for f in os.listdir(d) if f.endswith(cls.FILE_EXTENSION)]
path_names= [os.path.join(d,f) for f in filenames]
bare_names= [fn.rstrip(cls.FILE_EXTENSION) for fn in filenames] #without extension
instances= map( cls.load_from_file, path_names)
return dict(zip(bare_names, instances))
def _serialize_to_file( self, f ):
pickle.dump(self, f)
|
# chat/views.py
from django.shortcuts import render
def likeA(request):
return render(request, 'draw/likeA.html')
def feedbackA(request):
return render(request, 'draw/feedbackA.html')
def commentA(request):
return render(request, 'draw/commentA.html')
def likeB(request):
return render(request, 'draw/likeB.html')
def feedbackB(request):
return render(request, 'draw/feedbackB.html')
def commentB(request):
return render(request, 'draw/commentB.html')
def index(request):
return render(request, 'draw/index.html')
def large(request):
return render(request, 'draw/large.html')
def room(request, room_name):
return render(request, 'draw/room.html', {
'room_name': room_name
})
|
import os
basedir = os.path.abspath(os.path.dirname(__file__))
class Config(object):
#-------domain settings---------#
HTTPS_PORT = 443
HTTP_PORT = 80
INTERFACE = '0.0.0.0'
SSL_CERTFILE = 'ssl/server.crt'
SSL_KEYFILE = 'ssl/server.key'
#-------domain settings---------#
#-----application settings------#
SECRET_KEY = '$uP36S3c63T'
DEBUG = True
ACCESS_LOGFILE_NAME = 'logs/access_log'
ERROR_LOGFILE_NAME = 'logs/error_log'
UPLOAD_FOLDER = 'uploads'
DEFAULT_FOLDER = 'static/default'
#-----application settings------#
#-------oauth settings---------#
OAUTH_CLIENT = 'google'
GOOGLE_CLIENT_ID = 'XXXXXXXXXXXXXXXX-XXXXXXXXXXXXXXXXqbtijnssm9nv34ou6.apps.googleusercontent.com'
GOOGLE_CLIENT_SECRET = 'XXXXXXXXXXXXXXXX'
REDIRECT_URI = '/oauth2callback' # one of the Redirect URIs from Google APIs console
BASE_URL='https://www.google.com/accounts/'
AUTHORIZE_URL='https://accounts.google.com/o/oauth2/auth'
REQUEST_TOKEN_URL=None
REQUEST_TOKEN_PARAMS={'scope': 'https://www.googleapis.com/auth/userinfo.email',
'response_type': 'code'}
ACCESS_TOKEN_URL='https://accounts.google.com/o/oauth2/token'
ACCESS_TOKEN_METHOD='POST'
ACCESS_TOKEN_PARAMS={'grant_type': 'authorization_code'}
#-------oauth settings---------#
#-------other settings---------#
ALLOWED_DOMAINS = ['gmail.com']
SECURITY_EMAIL = 'mohan.gcsm@gmail.com'
APPSEC_USERS = ['mohan.gcsm@gmail.com']
PEER_REVIEW_ENABLED = False
PEER_REVIEW_REQUIRED_FOR = [
'new_web_app',
'new_mobile_app',
'new_rest_api',
'existing_web_app',
'existing_mobile_app',
'existing_rest_api'
]
#-------other settings---------#
#-external page links settings-#
RFP_KB_Link = ''
SEC_KB_Link = ''
#-external page links settings-#
#-------JIRA settings----------#
JIRA_SETTINGS = {
"JIRA_URL" : "<JIRA URL>",
"JIRA_USER" : "<JIRA USERNAME>",
"JIRA_PASS" : "<JIRA TOKEN not PASSWORD>",
"JIRA_PROJECT" : "<JIRA PROJECT NAME>",
"JIRA_TRANSITIONS" : [
{ # action ids with out peer review
"TODO_TRANS" : 711, # move to To do from backlog action
"SEND_FOR_REVIEW_TRANS" : None, # This should always be none
"APPROVE_TRANS" : 5, #Resolve action
"CLOSED_TRANS" : 2 # close or reject action,
},
{ # action ids with peer review
"TODO_TRANS" : 51, # move to To do from backlog action
"SEND_FOR_REVIEW_TRANS" : 151, # Send for review action
"APPROVE_TRANS" : 141, # approve action
"CLOSED_TRANS" : 131 # close or reject action
}
],
"JIRA_COMPONENTS" : {
"SECURITY_REVIEW" : "Security Review",
"SECURITY_BUG" : "Security Bug"
}
}
#-------allowed domains settings---------#
class ProductionConfig(Config):
DEBUG = False
class DevelopmentConfig(Config):
DEBUG = True
class TestingConfig(Config):
TESTING = True
|
class Solution(object):
def subarraySum(self, nums, k):
"""
:type nums: List[int]
:type k: int
:rtype: int
"""
sum_dict = {0: 1}
cumulative_sum = 0
count = 0
for num in nums:
cumulative_sum += num
diff = cumulative_sum - k
if diff in sum_dict:
count += sum_dict[diff]
if cumulative_sum not in sum_dict:
sum_dict[cumulative_sum] = 1
else:
sum_dict[cumulative_sum] += 1
return count
|
# -*- coding: utf-8 -*-
"""
Created on Tue Jun 16 21:14:45 2020
@author: luzhiwei
"""
import matplotlib.pyplot as plt
import numpy as np
import re
from matplotlib.pyplot import MultipleLocator
from math import sqrt, pow, sin, cos, pi
import sys
import math
def draw_band_structure(eigenval_filename, dos_file, pos_file, kpoint_filename,yaxis_max,yaxis_min):
yaxis_max=yaxis_max
yaxis_min=yaxis_min
engval,kpoints,ispin_type=read_eigenval(eigenval_filename)
E_fermi = get_fermi_from_doscar(dos_file)
engval=engval-E_fermi
latt_rec=read_recip(pos_file)
latt,latt_vec,atom,S,sys_name=read_poscar(pos_file)
hsp,hsp_label,node=read_high_sym_point(kpoint_filename)
temp=[]
for ii in range(1,kpoints.shape[0]):
temp.append(np.linalg.norm((kpoints[ii-1]-kpoints[ii])*latt_rec))
temp.insert(0,0)
temp=np.cumsum(temp)
temp=temp.reshape(kpoints.shape[0],1)
kpoints=np.append(kpoints,temp,axis=1)
aa=[]
for ii in range(1,int(hsp.shape[0]/2)+1):
aa.append(kpoints[node*(ii-1),:])
aa.append(kpoints[node*ii-1,:])
new_array = np.array(aa)
x_scale=[]
x_scale=np.array(list(set([tuple(x) for x in new_array])))
coordskeys2=[]
coordskeys3=[]
new_array2=list(new_array)
ii=0
for aa in new_array:
bb=np.sum(aa)
ii=ii+1
if bb not in coordskeys2:
coordskeys2.append(bb)
coordskeys3.append(ii)
coordskeys5=[x-1 for x in coordskeys3] #编号
coordskeys5=np.array(coordskeys5)
coordskeys6=[]
for ii in range(int(len(coordskeys5))):
coordskeys6.append(tuple(hsp_label[coordskeys5[ii]]))
coef = np.array(coordskeys6).flatten()
kk=[]
for bb in coef:
kk.append( re.findall(r'\b\w', bb))
hsp_label2=np.array(kk).flatten() #k点符号
if ispin_type==1:
plt.figure(dpi=300)
energy=np.zeros([engval.shape[0],int(engval.shape[1]/2)])
for ii in range(energy.shape[0]):
for jj in range(energy.shape[1]):
energy[ii][jj]=engval[ii][int(2*jj)]
# print(coordskeys2,coordskeys5)
plot_band(kpoints,x_scale,hsp_label2,E_fermi,sys_name,energy,'r',yaxis_max,yaxis_min)
else:
plt.figure(dpi=300)
plt.subplot(121)
sys_name='spin_up'
energy_up=np.zeros([engval.shape[0],int(engval.shape[1]/2)])
for ii in range(energy_up.shape[0]):
for jj in range(energy_up.shape[1]):
energy_up[ii][jj]=engval[ii][int(2*jj)][0]
plot_band(kpoints,x_scale,hsp_label2,E_fermi,sys_name,energy_up,'b',yaxis_max,yaxis_min)
plt.subplot(122)
sys_name='spin_down'
energy_down=np.zeros([engval.shape[0],int(engval.shape[1]/2)])
for ii in range(energy_down.shape[0]):
for jj in range(energy_down.shape[1]):
energy_down[ii][jj]=engval[ii][int(2*jj)][1]
plot_band(kpoints,x_scale,hsp_label2,E_fermi,sys_name,energy_down,'r',yaxis_max,yaxis_min)
plt.savefig('Band.jpg')
def get_fermi_from_doscar(dos_file):
f=open(dos_file)
for ii in range(6):
line=f.readline()
s=np.array(line.split())
E_fermi=float(s[3])
return E_fermi
def plot_band(kpoints,x_scale,hsp_label2,E_fermi,sys_name,energy,color,yaxis_max,yaxis_min):
plt.plot(kpoints[:,3],energy,color=color)
for ii in range(int(x_scale.shape[0])):
plt.axvline(x=x_scale[ii,3], color='r', linestyle='--')
coordskeys7=np.array(sorted(list(x_scale[:,3])))
plt.xticks(coordskeys7,hsp_label2)
plt.axhline(0, color='r', linestyle='--')
x_max=coordskeys7[int(coordskeys7.shape[0]-1)]
x_min=coordskeys7[0]
y_max=yaxis_max
y_min=yaxis_min
y_major_locator=MultipleLocator(1)
plt.gca().yaxis.set_major_locator(y_major_locator)
plt.xlim(x_min,x_max)
plt.ylim(y_min,y_max)
plt.title('Bandstructure of '+sys_name,y=0.94,x=0.6)
def read_doscar(dos_file):
f=open(dos_file)
line=f.readline()
s1=np.array(line.split())
for ii in range(5):
line=f.readline()
s=np.array(line.split())
NEDOS=int(s[2])
n_element=int(s1[0])
line=f.readline()
line=f.readline()
line=f.readline()
split=line.split()
if len(split)==3:
ispin_type=1
else:
ispin_type=2
sum_dos=np.zeros([NEDOS,len(split)])
f.close()
f=open(dos_file)
for ii in range(6):
line=f.readline()
for ii in range(NEDOS):
line=f.readline()
split=line.split()
sum_dos[ii][:]=[float(split[jj]) for jj in range(len(split))]
line=f.readline()
line=f.readline()
split=line.split()
if not line:
have_pdos=False
else:
have_pdos=True
if have_pdos:
n_pdos = len(split);
else:
n_pdos = 0;
f.close()
if n_pdos!=0:
f=open(dos_file)
for nn in range(NEDOS+6):
line=f.readline()
p_dos=np.zeros([n_element,NEDOS,n_pdos])
temp_dos=[]
for ii in range(n_element):
line=f.readline()
temp_dos=[]
for jj in range(NEDOS):
line=f.readline()
split=line.split()
p_dos[ii][jj][:]=[float(split[kk]) for kk in range(len(split))]
else:
p_dos=0
return sum_dos,p_dos
def read_eigenval(eigenval_filename):
f=open(eigenval_filename)
line=f.readline()
split=line.split()
ispin_type=int(split[-1])
for ii in range(5):
line=f.readline()
split=line.split()
nbands=int(split[-1])
nkpoints=int(split[1])
kpoints=[]
if ispin_type==1:
engval=np.zeros([nkpoints,nbands*2])
# print(engval.size)
for ii in range(nkpoints):
line=f.readline()
line=f.readline()
split=line.split()
kpoints.append([float(split[j]) for j in range(3)])
for jj in range(nbands):
line = f.readline()
split=line.split()
engval[ii][2*jj]=[float(split[kk]) for kk in range(3)][1]
engval[ii][2*jj+1]=[float(split[kk]) for kk in range(3)][-1]
kpoints=np.array(kpoints)
# engval=engval-E_fermi
else:
engval= np.zeros([nkpoints,nbands*2,2])
for ii in range(nkpoints):
line=f.readline()
line=f.readline()
split=line.split()
kpoints.append([float(split[j]) for j in range(3)])
for jj in range(nbands):
line = f.readline()
split=line.split()
engval[ii][2*jj][0]=float(split[1])
engval[ii][2*jj+1][0]=float(split[3])
engval[ii][2*jj][1]=float(split[2])
engval[ii][2*jj+1][1]=float(split[4])
kpoints=np.array(kpoints)
# engval=engval-E_fermi
f.close()
return engval,kpoints,ispin_type
def read_high_sym_point(kpoint_filename):
f=open(kpoint_filename)
hsp_1=[]
hsp_label=[]
k=1;
kk=1
line1=f.readline()
line2=f.readline()
mat=re.findall('\d+',line2)
node=int(mat[0])
# print(node)
mat2=[]
for line in f.readlines()[2:]:
aa=line[0:line.rfind('!')]
hsp_1.append(aa.split())
bb=line[line.rfind('!')+1:]
hsp_label.append(bb.split())
while [] in hsp_1 and hsp_label:
hsp_1.remove([])
hsp_label.remove([])
hsp=[]
for ii in range(len(hsp_1)):
a1=hsp_1[ii]
for jj in range(3):
a1[jj]=np.float(a1[jj])
hsp.append(a1)
hsp=np.array(hsp)
f.close()
return hsp,hsp_label,node
def read_poscar(pos_file):
try:
latt_vec=np.zeros([3,3])
poscar=open(pos_file)
line=poscar.readline()
sys_name=line
latt= float(poscar.readline())
for i in range(3):
line = poscar.readline().split()
for j in range(3):
latt_vec[i,j] = latt*float(line[j])
atom_ele=poscar.readline().split()
atom_num_old=poscar.readline().split()
atom_num=[]
for n in atom_num_old:
atom_num.append(int(n))
atom=atom_ele+atom_num
atom_sum=(np.array(atom_num)).sum()
line2=poscar.readline()
S=np.zeros([atom_sum,3])
for ii in range(atom_sum):
S1 = poscar.readline().split()
for jj in range(3):
S[ii,jj] =np.float(S1[jj])
except IOError:
print ('POSCAR is not exist or file open failed! \n')
poscar.close()
return latt,latt_vec,atom,S,sys_name
def read_recip(pos_file):
pos_file='POSCAR'
read_poscar(pos_file)
latt,latt_vec,atom,S,sys_name=read_poscar(pos_file)
latt_rec = 2*math.pi*np.mat(latt_vec).I.T
return latt_rec
def draw_ados(eigenval_filename, dos_file, pos_file, kpoint_filename,yaxis_max,yaxis_min):
engval,kpoints,ispin_type=read_eigenval(eigenval_filename)
E_fermi = get_fermi_from_doscar(dos_file)
engval=engval-E_fermi
latt_rec=read_recip(pos_file)
latt,latt_vec,atom,S,sys_name=read_poscar(pos_file)
hsp,hsp_label,node=read_high_sym_point(kpoint_filename)
temp=[]
for ii in range(1,kpoints.shape[0]):
temp.append(np.linalg.norm((kpoints[ii-1]-kpoints[ii])*latt_rec))
temp.insert(0,0)
temp=np.cumsum(temp)
temp=temp.reshape(kpoints.shape[0],1)
kpoints=np.append(kpoints,temp,axis=1)
aa=[]
for ii in range(1,int(hsp.shape[0]/2)+1):
aa.append(kpoints[node*(ii-1),:])
aa.append(kpoints[node*ii-1,:])
new_array = np.array(aa)
x_scale=[]
x_scale=np.array(list(set([tuple(x) for x in new_array])))
return x_scale,sys_name
def draw_pdos(eigenval_filename, dos_file, pos_file, kpoint_filename,yaxis_max,yaxis_min):
x_scale,sys_name=draw_ados(eigenval_filename, dos_file, pos_file, kpoint_filename,yaxis_max,yaxis_min)
E_fermi = get_fermi_from_doscar(dos_file)
sum_dos,p_dos=read_doscar(dos_file)
[latt,latt_vec,atom,S,sys_name]=read_poscar(pos_file)
f=open(dos_file)
line=f.readline()
s1=np.array(line.split())
for ii in range(5):
line=f.readline()
s=np.array(line.split())
npdos=np.size(p_dos,2)
if npdos==10 or npdos==17:
atom_len=int(len(atom)/2)
element_dos_up = np.zeros((int(s[2]),atom_len ))
sh=np.shape(p_dos)
p_dos2=np.zeros((sh[0],sh[1],sh[2]-1))
atom_num=[]
atom_num=atom[atom_len:]
seq = np.zeros((len(atom_num),2))
seq[0,:]= [0, atom_num[0]]
sum_dos[:,0]=sum_dos[:,0]-E_fermi
plt.figure(dpi=300)
for ik in range(1,len(atom_num)):
seq[ik,:]=[atom_num[ik-1],sum(atom_num[0:ik+1])]
for kk in range(int(s1[0])):
p_dos2[kk,:,:]=np.delete(p_dos[kk,:,:],0,axis=1)
for ii in range(atom_len):
element_dos_up[:, ii]=(p_dos2[int(seq[ii,0]):int(seq[ii,1])].sum(axis=2).sum(axis=0))
plt.plot(sum_dos[:,0], element_dos_up[:, ii],label=str(atom[ii]),linestyle='--')
plt.plot(sum_dos[:,0],sum_dos[:,1],label='Total',lw=3)
plt.legend(loc=0,ncol=1)
coordskeys7=np.array(sorted(list(x_scale[:,3])))
plt.axvline(0, color='r', linestyle='--', lw=1)
x_max=yaxis_max
x_min=yaxis_min
x_major_locator=MultipleLocator(1)
plt.gca().xaxis.set_major_locator(x_major_locator)
plt.xlim(x_min,x_max)
plt.ylim(0,)
plt.title('PDOS of '+sys_name,y=0.94,x=0.6)
elif npdos==19 or 33:
atom_len=int(len(atom)/2)
element_dos_up = np.zeros((int(s[2]),atom_len ))
element_dos_down = np.zeros((int(s[2]),atom_len ))
sh=np.shape(p_dos)
p_dos2=np.zeros((sh[0],sh[1],sh[2]-1))
atom_num=[]
atom_num=atom[atom_len:]
seq = np.zeros((len(atom_num),2))
seq[0,:]= [0, atom_num[0]]
plt.figure(dpi=300)
for ik in range(1,len(atom_num)):
seq[ik,:]=[atom_num[ik-1],sum(atom_num[0:ik+1])]
sum_dos[:,0]=sum_dos[:,0]-E_fermi
for kk in range(int(s1[0])):
p_dos2[kk,:,:]=np.delete(p_dos[kk,:,:],0,axis=1)
for ii in range(atom_len):
element_dos_up[:, ii]=(p_dos2[int(seq[ii,0]):int(seq[ii,1]),:,0::2].sum(axis=2).sum(axis=0))
element_dos_down[:, ii]=(p_dos2[int(seq[ii,0]):int(seq[ii,1]),:,1::2].sum(axis=2).sum(axis=0))
plt.plot(sum_dos[:,0], element_dos_up[:, ii],label=str(atom[ii])+'_up',linestyle='--')
plt.plot(sum_dos[:,0], -1*element_dos_down[:, ii],label=str(atom[ii])+'_down',linestyle='--')
plt.plot(sum_dos[:,0],sum_dos[:,1],label='Total_up',lw=3)
plt.plot(sum_dos[:,0],-1*sum_dos[:,2],label='Total_down',lw=3)
plt.legend(loc=0,ncol=1)
coordskeys7=np.array(sorted(list(x_scale[:,3])))
plt.axvline(0, color='r', linestyle='--', lw=1)
x_max=yaxis_max
x_min=yaxis_min
x_major_locator=MultipleLocator(1)
plt.gca().xaxis.set_major_locator(x_major_locator)
plt.xlim(x_min,x_max)
plt.title('PDOS of '+sys_name,y=0.94,x=0.6)
plt.savefig('Pdos.jpg')
def draw_band_pdos(eigenval_filename, dos_file, pos_file, kpoint_filename,yaxis_max,yaxis_min):
yaxis_max=yaxis_max
yaxis_min=yaxis_min
engval,kpoints,ispin_type=read_eigenval(eigenval_filename)
E_fermi = get_fermi_from_doscar(dos_file)
engval=engval-E_fermi
latt_rec=read_recip(pos_file)
latt,latt_vec,atom,S,sys_name=read_poscar(pos_file)
hsp,hsp_label,node=read_high_sym_point(kpoint_filename)
temp=[]
for ii in range(1,kpoints.shape[0]):
temp.append(np.linalg.norm((kpoints[ii-1]-kpoints[ii])*latt_rec))
temp.insert(0,0)
temp=np.cumsum(temp)
temp=temp.reshape(kpoints.shape[0],1)
kpoints=np.append(kpoints,temp,axis=1)
aa=[]
for ii in range(1,int(hsp.shape[0]/2)+1):
aa.append(kpoints[node*(ii-1),:])
aa.append(kpoints[node*ii-1,:])
new_array = np.array(aa)
x_scale=[]
x_scale=np.array(list(set([tuple(x) for x in new_array])))
coordskeys2=[]
coordskeys3=[]
new_array2=list(new_array)
ii=0
for aa in new_array:
bb=np.sum(aa)
ii=ii+1
if bb not in coordskeys2:
coordskeys2.append(bb)
coordskeys3.append(ii)
coordskeys5=[x-1 for x in coordskeys3] #编号
coordskeys5=np.array(coordskeys5)
coordskeys6=[]
for ii in range(int(len(coordskeys5))):
coordskeys6.append(tuple(hsp_label[coordskeys5[ii]]))
coef = np.array(coordskeys6).flatten()
kk=[]
for bb in coef:
kk.append( re.findall(r'\b\w', bb))
hsp_label2=np.array(kk).flatten() #k点符号
if ispin_type==1:
plt.figure(dpi=300)
plt.subplot(121)
energy=np.zeros([engval.shape[0],int(engval.shape[1]/2)])
for ii in range(energy.shape[0]):
for jj in range(energy.shape[1]):
energy[ii][jj]=engval[ii][int(2*jj)]
# print(coordskeys2,coordskeys5)
plot_band(kpoints,x_scale,hsp_label2,E_fermi,sys_name,energy,'r',yaxis_max,yaxis_min)
plt.subplot(122)
x_scale,sys_name=draw_ados(eigenval_filename, dos_file, pos_file, kpoint_filename,yaxis_max,yaxis_min)
E_fermi = get_fermi_from_doscar(dos_file)
sum_dos,p_dos=read_doscar(dos_file)
[latt,latt_vec,atom,S,sys_name]=read_poscar(pos_file)
f=open(dos_file)
line=f.readline()
s1=np.array(line.split())
for ii in range(5):
line=f.readline()
s=np.array(line.split())
npdos=np.size(p_dos,2)
atom_len=int(len(atom)/2)
element_dos_up = np.zeros((int(s[2]),atom_len ))
sh=np.shape(p_dos)
p_dos2=np.zeros((sh[0],sh[1],sh[2]-1))
atom_num=[]
atom_num=atom[atom_len:]
seq = np.zeros((len(atom_num),2))
seq[0,:]= [0, atom_num[0]]
sum_dos[:,0]=sum_dos[:,0]-E_fermi
for ik in range(1,len(atom_num)):
seq[ik,:]=[atom_num[ik-1],sum(atom_num[0:ik+1])]
for kk in range(int(s1[0])):
p_dos2[kk,:,:]=np.delete(p_dos[kk,:,:],0,axis=1)
for ii in range(atom_len):
element_dos_up[:, ii]=(p_dos2[int(seq[ii,0]):int(seq[ii,1])].sum(axis=2).sum(axis=0))
plt.plot(element_dos_up[:, ii],sum_dos[:,0], label=str(atom[ii]),linestyle='--')
plt.plot(sum_dos[:,1],sum_dos[:,0],label='Total',lw=3)
plt.legend(loc=0,ncol=1)
coordskeys7=np.array(sorted(list(x_scale[:,3])))
plt.axhline(0, color='r', linestyle='--', lw=1)
y_max=yaxis_max
y_min=yaxis_min
y_major_locator=MultipleLocator(1)
plt.gca().yaxis.set_major_locator(y_major_locator)
plt.xlim(0,)
plt.ylim(y_min,y_max)
plt.yticks([])
plt.title('PDOS of '+sys_name,y=0.94,x=0.8)
plt.savefig('Band-pdos.jpg')
#pos_file='POSCAR'
#eigenval_filename='EIGENVAL'
#dos_file='DOSCAR'
#kpoint_filename='KPOINTS2'
#engval,kpoints=read_eigenval(eigenval_filename)
#E_fermi = get_fermi_from_doscar(dos_file)
#if len(engval.shape)==2:
# energy_up=engval
# latt_rec=read_recip(pos_file)
# latt,latt_vec,atom,S,sys_name=read_poscar(pos_file)
# temp=[]
# for ii in range(1,kpoints.shape[0]):
# temp.append(np.linalg.norm((kpoints[ii-1]-kpoints[ii])*latt_rec))
# temp.insert(0,0)
# temp=np.cumsum(temp)
# temp=temp.reshape(kpoints.shape[0],1)
# kpoints=np.append(kpoints,temp,axis=1)
## print(kpoints)
# hsp,hsp_label,node=read_high_sym_point(kpoint_filename)
# energy=np.zeros([engval.shape[0],int(engval.shape[1]/2)])
# for ii in range(energy.shape[0]):
# for jj in range(energy.shape[1]):
# energy[ii][jj]=engval[ii][int(2*jj)]
# pl |
import sys
sys.path.append('../500_common')
import lib
import lib_ss
# images = lib.get_images("../500_common/data/result.html")
soup = lib_ss.main("/Users/nakamurasatoru/git/d_genji/genji_curation/src/500_common/Chrome31", "Profile 3")
images = lib.get_images_by_soup(soup)
collectionUrl = "https://utda.github.io/genji/iiif/ndl-8943312/top.json"
areas = ["4000,600,3000,4300", "950,600,3000,4300"]
countMax = 10
# token = lib.get_token("../token.yml")
token = "eyJhbGciOiJSUzI1NiIsImtpZCI6IjA4MGU0NWJlNGIzMTE4MzA5M2RhNzUyYmIyZGU5Y2RjYTNlNmU4ZTciLCJ0eXAiOiJKV1QifQ.eyJuYW1lIjoi5Lit5p2R6KaaIiwicGljdHVyZSI6Imh0dHBzOi8vbGgzLmdvb2dsZXVzZXJjb250ZW50LmNvbS8tWHQ1NENUT1pEdVEvQUFBQUFBQUFBQUkvQUFBQUFBQUFBQUEvQUFLV0pKTjg3RWs3MVZqeTZyWTNpeTh6bmFFR0FqeFlpdy9waG90by5qcGciLCJpc3MiOiJodHRwczovL3NlY3VyZXRva2VuLmdvb2dsZS5jb20vY29kaC04MTA0MSIsImF1ZCI6ImNvZGgtODEwNDEiLCJhdXRoX3RpbWUiOjE2MDkyNTYxMjMsInVzZXJfaWQiOiJvZ2Z0UkpaeGxDZzZIRDZMelNPWGZ4ZlBXYUEzIiwic3ViIjoib2dmdFJKWnhsQ2c2SEQ2THpTT1hmeGZQV2FBMyIsImlhdCI6MTYwOTMzNTUwMSwiZXhwIjoxNjA5MzM5MTAxLCJlbWFpbCI6InNhLnRvcnUuYTQxNjIzQGdtYWlsLmNvbSIsImVtYWlsX3ZlcmlmaWVkIjp0cnVlLCJmaXJlYmFzZSI6eyJpZGVudGl0aWVzIjp7Imdvb2dsZS5jb20iOlsiMTA0ODEzNDEzMzM0OTI0ODM4NDQzIl0sImVtYWlsIjpbInNhLnRvcnUuYTQxNjIzQGdtYWlsLmNvbSJdfSwic2lnbl9pbl9wcm92aWRlciI6Imdvb2dsZS5jb20ifX0.Y-WMBswcY4lRel0ybz-We3xqK_CAy5vc3IiIDAvr79YjXK7Otv0XPcOMqJEjgOozVMo7I1UYK6FWlaQhy46VQ9bP4_9IDEsqezVhH_uHu5Mogs5oz0T6fsiBbf1tuPqkaTQpIbPu5silB2LIxR6TD-4DTamX7xiUaF_AKwTRgh83Cacce0Y6qPUze_G2Ne-2AI7vjDgTJ5gkiYzqTD7BsrJX5fMe2zfGnE2AjAULMRZpbzkETV8BVxR-wfHzn0E0ypvAXBhqHxrgHae76Cro8foqyQfWdPCh1krV_dbkK9-StAzfpQaHTU4USr0jF5RGc7zb4x-MLK4qPCX6kV1avg"
lib.post(collectionUrl, areas, countMax, token, images, "Collection") |
import numpy as np
#----------------------- Function -----------------------#
def suppress(boxes, overlapThresh=0.5):
'''
suppress(boxes[, overlapThresh]) -> pick
非極大值抑制.
@param boxes : bounding boxes
@param overlapThresh : overlap threshold
@return be picked boxes.
'''
# if there are no boxes, return an empty list
# rects = np.asarray(rects)
if len(boxes) == 0:
# print('boxes not found.')
return boxes
# if the bounding boxes integers, convert them to floats --
# this is important since we'll be doing a bunch of divisions
if boxes.dtype.kind == "i":
boxes = boxes.astype("float")
# initialize the list of picked indexes
pick = []
# grab the coordinates of the bounding boxes
tlX = boxes[:, 0]
tlY = boxes[:, 1]
w = boxes[:, 2]
h = boxes[:, 3]
score = boxes[:, 4].astype('uint8')
brX = tlX + w
brY = tlY + h
# compute the area of the bounding boxes and sort the bounding
# boxes by the bottom-right y-coordinate of the bounding box
area = w * h
idxs = np.argsort(score)#[::-1]
# print('- score :', score)
# print('- idxs :', idxs)
# keep looping while some indexes still remain in the indexes
# list
while len(idxs) > 0:
# grab the last index in the indexes list and add the
# index value to the list of picked indexes
last = len(idxs) - 1
i = idxs[last]
pick.append(i)
# print('- pick :', pick)
# find the largest (x, y) coordinates for the start of
# the bounding box and the smallest (x, y) coordinates
# for the end of the bounding box
tlx = np.maximum(tlX[i], tlX[idxs[:last]])
tly = np.maximum(tlY[i], tlY[idxs[:last]])
brx = np.minimum(brX[i], brX[idxs[:last]])
bry = np.minimum(brY[i], brY[idxs[:last]])
# compute the width and height of the bounding box
olpW = np.maximum(0, brx - tlx + 1)
olpH = np.maximum(0, bry - tly + 1)
# compute the ratio of overlap
overlap = (olpW * olpH) / area[idxs[:last]]
# print('- overlap :', overlap)
# overlap1 = (olpW * olpH) / area[idxs[:last]]
# overlap2 = (olpW * olpH) / area[idxs[last]]
# overlap = np.maximum(overlap1, overlap2)
# delete all indexes from the index list that have
idxs = np.delete(idxs, np.concatenate(([last],
np.where(overlap > overlapThresh)[0])))
# print('- idxs :', idxs)
# return only the bounding boxes that were picked using the
# integer data type
return boxes[pick]
if __name__ == '__main__':
import cv2
src = cv2.imread('./train/eli_walk3.png')
canvas = src.copy()
canvasNMS = src.copy()
canvas_all = np.zeros((src.shape[0], 2*src.shape[1], 3), dtype='uint8')
boxes = [[ 90, 45, 85, 150, 3],
[ 95, 50, 85, 150, 2],
[ 85, 38, 80, 145, 5]]
boxes = np.asarray(boxes, dtype=np.int32)
for box in boxes:
x, y, w, h = box[:4]
cv2.rectangle(canvas, (int(x), int(y)), (int(x+w), int(y+h)), (0, 255, 0), 2)
boxes = suppress(boxes)
for box in boxes:
x, y, w, h = box[:4]
cv2.rectangle(canvasNMS, (int(x), int(y)), (int(x+w), int(y+h)), (0, 255, 0), 2)
canvas_all[:src.shape[0], :src.shape[1]] = canvas
canvas_all[:src.shape[0], src.shape[1]:] = canvasNMS
cv2.imshow('canvas_all', canvas_all)
# cv2.imwrite('./pedestrian_NMS.jpg', canvas_all)
cv2.waitKey(0)
cv2.destroyAllWindows() |
from fighters.Fighter import Fighter
# Courtney the PC!
class Courtney(Fighter):
def __init__(self):
super().__init__("Coutney", 175, 20, 5, 7, 15)
|
from _typeshed import Incomplete
def random_clustered_graph(
joint_degree_sequence,
create_using: Incomplete | None = None,
seed: Incomplete | None = None,
): ...
|
# From clang/bindings/python/cindex/test
# This file provides common utility functions for the test suite.
#
from clang.cindex import Cursor
from clang.cindex import TranslationUnit
from collections.abc import Iterable
import logging
import re
from ctypeslib.codegen import typedesc
log = logging.getLogger('utils')
def get_tu(source, lang='c', all_warnings=False, flags=None):
"""Obtain a translation unit from source and language.
By default, the translation unit is created from source file "t.<ext>"
where <ext> is the default file extension for the specified language. By
default it is C, so "t.c" is the default file name.
Supported languages are {c, cpp, objc}.
all_warnings is a convenience argument to enable all compiler warnings.
"""
args = list(flags or [])
name = 'memory_input.c'
if lang == 'cpp':
name = 'memory_input.cpp'
args.append('-std=c++11')
elif lang == 'objc':
name = 'memory_input.m'
elif lang != 'c':
raise Exception('Unknown language: %s' % lang)
if all_warnings:
args += ['-Wall', '-Wextra']
return TranslationUnit.from_source(name, args, unsaved_files=[(name, source)])
def get_cursor(source, spelling):
"""Obtain a cursor from a source object.
This provides a convenient search mechanism to find a cursor with specific
spelling within a source. The first argument can be either a
TranslationUnit or Cursor instance.
If the cursor is not found, None is returned.
"""
children = []
if isinstance(source, Cursor):
children = source.get_children()
else:
# Assume TU
children = source.cursor.get_children()
for cursor in children:
if cursor.spelling == spelling:
return cursor
# Recurse into children.
result = get_cursor(cursor, spelling)
if result is not None:
return result
return None
def get_cursors(source, spelling):
"""Obtain all cursors from a source object with a specific spelling.
This provides a convenient search mechanism to find all cursors with specific
spelling within a source. The first argument can be either a
TranslationUnit or Cursor instance.
If no cursors are found, an empty list is returned.
"""
cursors = []
children = []
if isinstance(source, Cursor):
children = source.get_children()
else:
# Assume TU
children = source.cursor.get_children()
for cursor in children:
if cursor.spelling == spelling:
cursors.append(cursor)
# Recurse into children.
cursors.extend(get_cursors(cursor, spelling))
return cursors
def decorator(dec):
def new_decorator(f):
g = dec(f)
g.__name__ = f.__name__
g.__doc__ = f.__doc__
g.__dict__.update(f.__dict__)
return g
new_decorator.__name__ = dec.__name__
new_decorator.__doc__ = dec.__doc__
new_decorator.__dict__.update(dec.__dict__)
return new_decorator
@decorator
def log_entity(func):
def fn(*args, **kwargs):
name = args[0].get_unique_name(args[1])
if name == '':
parent = args[1].semantic_parent
if parent:
name = 'child of %s' % parent.displayname
log.debug("%s: displayname:'%s'",func.__name__, name)
# print 'calling {}'.format(func.__name__)
return func(*args, **kwargs)
return fn
class ADict(dict):
def __getattr__(self, name):
try:
return self[name]
except KeyError:
raise AttributeError(name)
_c_literal_regex = re.compile(
r"^([+-]?((\d+(e|E)[+-]?\d+)|(\d+(\.\d*)?((e|E)[+-]?\d+)?)|(\.\d+((e|E)[+-]?\d+)?)))(f|F|l|L)?$"
)
def from_c_float_literal(value):
if (not isinstance(value, str) and
isinstance(value, Iterable) and
all(map(lambda v: isinstance(v, str), value))):
value = "".join(value)
if not isinstance(value, str):
return None
match = _c_literal_regex.match(value)
if not match:
return None
return match.group(1)
def contains_undefined_identifier(macro):
# body is undefined
if isinstance(macro.body, typedesc.UndefinedIdentifier):
return True
def _list_contains_undefined_identifier(l):
for b in l:
if isinstance(b, typedesc.UndefinedIdentifier):
return True
if isinstance(b, list) and _list_contains_undefined_identifier(b):
return True
return False
# or one item is undefined
if isinstance(macro.body, list):
if _list_contains_undefined_identifier(macro.body):
return True
return False
def token_is_string(token):
# we need at list 2 delimiters in there
if not isinstance(token, Iterable) or len(token) < 2:
return False
delim = token[0]
return delim in ["'", '"'] and token[0] == token[-1]
def body_is_all_string_tokens(macro_body):
if isinstance(macro_body, list):
for b in macro_body:
if token_is_string(b):
continue
else:
return False
return True
return False
__all__ = [
'get_cursor',
'get_cursors',
'get_tu',
'from_c_float_literal',
'log_entity'
]
|
matriz = [[0, 0, 0], [0, 0, 0], [0, 0, 0]]
somapares = somacoluna = mai = 0
for l in range(0, 3):
for c in range(0, 3):
matriz[l][c] = int(input(f'Digite um valores para [{l}/{c}]: '))
if matriz[l][c] % 2 == 0:
somapares += matriz[l][c]
if matriz[l][c] == matriz[l][2]:
somacoluna += matriz[l][2]
if matriz[l][c] == matriz[2][c]:
if c == 0:
mai = matriz[1][c]
elif matriz[1][c] > mai:
mai = matriz[1][c]
print('=-'*30)
for l in range(0, 3):
for c in range(0, 3):
print(f'[{matriz[l][c]}]', end='')
print()
print('=-'*30)
print(f'A soma dos valores pares é: {somapares}')
print(f'A soma dos valores da 3ª coluna é: {somacoluna}')
print(f'O maior valore da segunda linha é: {mai}') |
st=input()
l=list(st)
d={}
max=0
r=0
for i in range(len(l)):
if(l[i]!=' '):
d[l[i]]=l.count(l[i])
for k,v in d.items():
if(v>max):
max=v
r=k
print(str(r))
|
def getMachineCode(param):
mc = {
'MOV':'0001',
'ADD':'0010',
'ADC':'0011',
'SUB':'0100',
'SBC':'0101',
'AND':'0110',
'OR' :'0111',
'XNOR':'1000',
'CMP':'1001',
'INC':'0000001010',
'DEC':'0000001011',
'CLR':'0000001100',
'INV':'0000001101',
'LSR':'0000001110',
'ROR':'0000001111',
'RRC':'0000010000',
'ASR':'0000010001',
'LSL':'0000010010',
'ROL':'0000010011',
'RLC':'0000010100',
'JSR':'0000010101',
'BR' :'00001000',
'BEQ':'00001001',
'BNE':'00001010',
'BLO':'00001011',
'BLS':'00001100',
'BHI':'00001101',
'BHS':'00001110',
'HLT': '0000000001000000',
'NOP': '0000000010000000',
'RTS': '0000000011000000',
'IRET':'0000000100000000',
'register':'000',
'autoincrement':'001',
'autodecrement':'010',
'indexed':'011',
'indirectregister':'100',
'indirectautoincrement':'101',
'indirectautodecrement':'110',
'indirectindexed':'111',
'R0':'000',
'R1':'001',
'R2':'010',
'R3':'011',
'R4':'100',
'R5':'101',
'R6':'110',
'R7':'111',
}.get(param)
if(not mc):
raise Exception("Not valid operation", param)
return mc
# def getMachineCode(param):
# return{
# 'MOV':'MOV',
# 'ADD':'ADD',
# 'ADC':'ADC',
# 'SUB':'SUB',
# 'SBC':'SBC',
# 'AND':'AND',
# 'OR' :'OR',
# 'XNOR':'XNOR',
# 'CMP':'CMP',
# 'INC':'INC',
# 'DEC':'DEC',
# 'CLR':'CLR',
# 'INV':'INV',
# 'LSR':'LSR',
# 'ROR':'ROR',
# 'RRC':'0000000110',
# 'ASR':'0000000111',
# 'LSL':'0000001000',
# 'ROL':'0000001001',
# 'RLC':'0000001010',
# 'JSR':'opcodeofjsr',
# 'BR' :'BR',
# 'BEQ':'0000001001',
# 'BNE':'0000001010',
# 'BLO':'0000001011',
# 'BHI':'0000001100',
# 'BHS':'0000001110',
# 'HLT':'HLT',
# 'NOP':'0000001100',
# 'register':'register',
# 'autoincrement':'autoincrement',
# 'autodecrement':'autodecrement',
# 'indexed':'indexed',
# 'indirectregister':'indirectregister',
# 'indirectautoincrement':'indirectautoincrement',
# 'indirectautodecrement':'indirectautodecrement',
# 'indirectindexed':'indirectindexed',
# 'R0':'R0',
# 'R1':'R1',
# 'R2':'R2',
# 'R3':'R3',
# 'R4':'R4',
# 'R5':'R5',
# 'R6':'R6',
# 'R7':'R7'
# }.get(param) |
# Copyright 2020 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import annotations
import importlib.resources
import subprocess
import sys
from dataclasses import dataclass
from enum import Enum
from pathlib import Path, PurePath
from textwrap import dedent
from typing import Iterable, List, cast
from unittest.mock import Mock
import pytest
from pants.backend.python import target_types_rules
from pants.backend.python.goals import package_pex_binary
from pants.backend.python.subsystems import setuptools
from pants.backend.python.subsystems.setup import PythonSetup
from pants.backend.python.target_types import (
EntryPoint,
PexBinary,
PexLayout,
PythonRequirementTarget,
PythonSourcesGeneratorTarget,
PythonSourceTarget,
PythonTestTarget,
)
from pants.backend.python.util_rules import pex_from_targets, pex_test_utils
from pants.backend.python.util_rules.pex import (
OptionalPex,
OptionalPexRequest,
Pex,
PexPlatforms,
PexRequest,
ReqStrings,
)
from pants.backend.python.util_rules.pex_from_targets import (
ChosenPythonResolve,
ChosenPythonResolveRequest,
GlobalRequirementConstraints,
PexFromTargetsRequest,
_determine_requirements_for_pex_from_targets,
_PexRequirementsRequest,
_RepositoryPexRequest,
)
from pants.backend.python.util_rules.pex_requirements import (
EntireLockfile,
LoadedLockfile,
LoadedLockfileRequest,
Lockfile,
PexRequirements,
Resolve,
)
from pants.backend.python.util_rules.pex_test_utils import get_all_data
from pants.build_graph.address import Address
from pants.core.goals.generate_lockfiles import NoCompatibleResolveException
from pants.core.target_types import FileTarget, ResourceTarget
from pants.engine.addresses import Addresses
from pants.engine.fs import Snapshot
from pants.testutil.option_util import create_subsystem
from pants.testutil.python_rule_runner import PythonRuleRunner
from pants.testutil.rule_runner import MockGet, QueryRule, engine_error, run_rule_with_mocks
from pants.util.contextutil import pushd
from pants.util.ordered_set import FrozenOrderedSet, OrderedSet
from pants.util.strutil import softwrap
@pytest.fixture
def rule_runner() -> PythonRuleRunner:
return PythonRuleRunner(
rules=[
*package_pex_binary.rules(),
*pex_test_utils.rules(),
*pex_from_targets.rules(),
*target_types_rules.rules(),
QueryRule(PexRequest, (PexFromTargetsRequest,)),
QueryRule(ReqStrings, (PexRequirements,)),
QueryRule(GlobalRequirementConstraints, ()),
QueryRule(ChosenPythonResolve, [ChosenPythonResolveRequest]),
*setuptools.rules(),
],
target_types=[
PexBinary,
PythonSourcesGeneratorTarget,
PythonRequirementTarget,
PythonSourceTarget,
PythonTestTarget,
FileTarget,
ResourceTarget,
],
)
@pytest.mark.skip(reason="TODO(#15824)")
@pytest.mark.no_error_if_skipped
def test_choose_compatible_resolve(rule_runner: PythonRuleRunner) -> None:
def create_target_files(
directory: str, *, req_resolve: str, source_resolve: str, test_resolve: str
) -> dict[str, str]:
return {
f"{directory}/BUILD": dedent(
f"""\
python_source(name="dep", source="dep.py", resolve="{source_resolve}")
python_requirement(
name="req", requirements=[], resolve="{req_resolve}"
)
python_test(
name="test",
source="tests.py",
dependencies=[":dep", ":req"],
resolve="{test_resolve}",
)
"""
),
f"{directory}/tests.py": "",
f"{directory}/dep.py": "",
}
rule_runner.set_options(
["--python-resolves={'a': '', 'b': ''}", "--python-enable-resolves"], env_inherit={"PATH"}
)
rule_runner.write_files(
{
# Note that each of these BUILD files are entirely self-contained.
**create_target_files("valid", req_resolve="a", source_resolve="a", test_resolve="a"),
**create_target_files(
"invalid",
req_resolve="a",
source_resolve="a",
test_resolve="b",
),
}
)
def choose_resolve(addresses: list[Address]) -> str:
return rule_runner.request(
ChosenPythonResolve, [ChosenPythonResolveRequest(Addresses(addresses))]
).name
assert choose_resolve([Address("valid", target_name="test")]) == "a"
assert choose_resolve([Address("valid", target_name="dep")]) == "a"
assert choose_resolve([Address("valid", target_name="req")]) == "a"
with engine_error(NoCompatibleResolveException, contains="its dependencies are not compatible"):
choose_resolve([Address("invalid", target_name="test")])
with engine_error(NoCompatibleResolveException, contains="its dependencies are not compatible"):
choose_resolve([Address("invalid", target_name="dep")])
with engine_error(
NoCompatibleResolveException, contains="input targets did not have a resolve"
):
choose_resolve(
[Address("invalid", target_name="req"), Address("invalid", target_name="dep")]
)
def test_determine_requirements_for_pex_from_targets() -> None:
class RequirementMode(Enum):
PEX_LOCKFILE = 1
NON_PEX_LOCKFILE = 2
# Note that enable_resolves is mutually exclusive with requirement_constraints.
CONSTRAINTS_RESOLVE_ALL = 3
CONSTRAINTS_NO_RESOLVE_ALL = 4
NO_LOCKS = 5
req_strings = ["req1", "req2"]
global_requirement_constraints = ["constraint1", "constraint2"]
resolve__pex = Resolve("pex", False)
loaded_lockfile__pex = Mock(is_pex_native=True, as_constraints_strings=None)
chosen_resolve__pex = Mock(lockfile=Mock())
chosen_resolve__pex.name = "pex" # name has special meaning in Mock(), so must set it here.
resolve__not_pex = Resolve("not_pex", False)
loaded_lockfile__not_pex = Mock(is_pex_native=False, as_constraints_strings=req_strings)
chosen_resolve__not_pex = Mock(lockfile=Mock())
chosen_resolve__not_pex.name = "not_pex" # ditto.
repository_pex_request__lockfile = Mock()
repository_pex_request__constraints = Mock()
repository_pex__lockfile = Mock()
repository_pex__constraints = Mock()
def assert_setup(
_mode: RequirementMode,
*,
_internal_only: bool,
_platforms: bool,
include_requirements: bool = True,
run_against_entire_lockfile: bool = False,
expected_reqs: PexRequirements = PexRequirements(),
expected_pexes: Iterable[Pex] = (),
) -> None:
lockfile_used = _mode in (RequirementMode.PEX_LOCKFILE, RequirementMode.NON_PEX_LOCKFILE)
requirement_constraints_used = _mode in (
RequirementMode.CONSTRAINTS_RESOLVE_ALL,
RequirementMode.CONSTRAINTS_NO_RESOLVE_ALL,
)
python_setup = create_subsystem(
PythonSetup,
enable_resolves=lockfile_used,
run_against_entire_lockfile=run_against_entire_lockfile,
resolve_all_constraints=_mode != RequirementMode.CONSTRAINTS_NO_RESOLVE_ALL,
requirement_constraints="foo.constraints" if requirement_constraints_used else None,
)
pex_from_targets_request = PexFromTargetsRequest(
Addresses(),
output_filename="foo",
include_requirements=include_requirements,
platforms=PexPlatforms(["foo"] if _platforms else []),
internal_only=_internal_only,
)
resolved_pex_requirements = PexRequirements(
req_strings,
constraints_strings=(
global_requirement_constraints if requirement_constraints_used else ()
),
)
# NB: We recreate that platforms should turn off first creating a repository.pex.
if lockfile_used and not _platforms:
mock_repository_pex_request = OptionalPexRequest(
maybe_pex_request=repository_pex_request__lockfile
)
mock_repository_pex = OptionalPex(maybe_pex=repository_pex__lockfile)
elif _mode == RequirementMode.CONSTRAINTS_RESOLVE_ALL and not _platforms:
mock_repository_pex_request = OptionalPexRequest(
maybe_pex_request=repository_pex_request__constraints
)
mock_repository_pex = OptionalPex(maybe_pex=repository_pex__constraints)
else:
mock_repository_pex_request = OptionalPexRequest(maybe_pex_request=None)
mock_repository_pex = OptionalPex(maybe_pex=None)
reqs, pexes = run_rule_with_mocks(
_determine_requirements_for_pex_from_targets,
rule_args=[pex_from_targets_request, python_setup],
mock_gets=[
MockGet(
output_type=PexRequirements,
input_types=(_PexRequirementsRequest,),
mock=lambda _: resolved_pex_requirements,
),
MockGet(
output_type=ChosenPythonResolve,
input_types=(ChosenPythonResolveRequest,),
mock=lambda _: (
chosen_resolve__pex
if _mode == RequirementMode.PEX_LOCKFILE
else chosen_resolve__not_pex
),
),
MockGet(
output_type=Lockfile,
input_types=(Resolve,),
mock=lambda _: (
resolve__pex if _mode == RequirementMode.PEX_LOCKFILE else resolve__not_pex
),
),
MockGet(
output_type=LoadedLockfile,
input_types=(LoadedLockfileRequest,),
mock=lambda _: (
loaded_lockfile__pex
if _mode == RequirementMode.PEX_LOCKFILE
else loaded_lockfile__not_pex
),
),
MockGet(
output_type=OptionalPexRequest,
input_types=(_RepositoryPexRequest,),
mock=lambda _: mock_repository_pex_request,
),
MockGet(
output_type=OptionalPex,
input_types=(OptionalPexRequest,),
mock=lambda _: mock_repository_pex,
),
],
)
assert expected_reqs == reqs
assert expected_pexes == pexes
# If include_requirements is False, no matter what, early return.
for mode in RequirementMode:
assert_setup(
mode,
include_requirements=False,
_internal_only=False,
_platforms=False,
# Nothing is expected
)
# Pex lockfiles: usually, return PexRequirements with from_superset as the resolve.
# Except for when run_against_entire_lockfile is set and it's an internal_only Pex, then
# return PexRequest.
for internal_only in (True, False):
assert_setup(
RequirementMode.PEX_LOCKFILE,
_internal_only=internal_only,
_platforms=False,
expected_reqs=PexRequirements(req_strings, from_superset=resolve__pex),
)
assert_setup(
RequirementMode.PEX_LOCKFILE,
_internal_only=False,
_platforms=True,
expected_reqs=PexRequirements(req_strings, from_superset=resolve__pex),
)
for platforms in (True, False):
assert_setup(
RequirementMode.PEX_LOCKFILE,
_internal_only=False,
run_against_entire_lockfile=True,
_platforms=platforms,
expected_reqs=PexRequirements(req_strings, from_superset=resolve__pex),
)
assert_setup(
RequirementMode.PEX_LOCKFILE,
_internal_only=True,
run_against_entire_lockfile=True,
_platforms=False,
expected_reqs=repository_pex_request__lockfile.requirements,
expected_pexes=[repository_pex__lockfile],
)
# Non-Pex lockfiles: except for when run_against_entire_lockfile is applicable, return
# PexRequirements with from_superset as the lockfile repository Pex and constraint_strings as
# the lockfile's requirements.
for internal_only in (False, True):
assert_setup(
RequirementMode.NON_PEX_LOCKFILE,
_internal_only=internal_only,
_platforms=False,
expected_reqs=PexRequirements(
req_strings, constraints_strings=req_strings, from_superset=repository_pex__lockfile
),
)
assert_setup(
RequirementMode.NON_PEX_LOCKFILE,
_internal_only=False,
_platforms=True,
expected_reqs=PexRequirements(
req_strings, constraints_strings=req_strings, from_superset=None
),
)
assert_setup(
RequirementMode.NON_PEX_LOCKFILE,
_internal_only=False,
run_against_entire_lockfile=True,
_platforms=False,
expected_reqs=PexRequirements(
req_strings, constraints_strings=req_strings, from_superset=repository_pex__lockfile
),
)
assert_setup(
RequirementMode.NON_PEX_LOCKFILE,
_internal_only=False,
run_against_entire_lockfile=True,
_platforms=True,
expected_reqs=PexRequirements(
req_strings, constraints_strings=req_strings, from_superset=None
),
)
assert_setup(
RequirementMode.NON_PEX_LOCKFILE,
_internal_only=True,
run_against_entire_lockfile=True,
_platforms=False,
expected_reqs=repository_pex_request__lockfile.requirements,
expected_pexes=[repository_pex__lockfile],
)
# Constraints file with resolve_all_constraints: except for when run_against_entire_lockfile
# is applicable, return PexRequirements with from_superset as the constraints repository Pex
# and constraint_strings as the global constraints.
for internal_only in (False, True):
assert_setup(
RequirementMode.CONSTRAINTS_RESOLVE_ALL,
_internal_only=internal_only,
_platforms=False,
expected_reqs=PexRequirements(
req_strings,
constraints_strings=global_requirement_constraints,
from_superset=repository_pex__constraints,
),
)
assert_setup(
RequirementMode.CONSTRAINTS_RESOLVE_ALL,
_internal_only=False,
_platforms=True,
expected_reqs=PexRequirements(
req_strings, constraints_strings=global_requirement_constraints, from_superset=None
),
)
assert_setup(
RequirementMode.CONSTRAINTS_RESOLVE_ALL,
_internal_only=False,
run_against_entire_lockfile=True,
_platforms=False,
expected_reqs=PexRequirements(
req_strings,
constraints_strings=global_requirement_constraints,
from_superset=repository_pex__constraints,
),
)
assert_setup(
RequirementMode.CONSTRAINTS_RESOLVE_ALL,
_internal_only=False,
run_against_entire_lockfile=True,
_platforms=True,
expected_reqs=PexRequirements(
req_strings, constraints_strings=global_requirement_constraints, from_superset=None
),
)
assert_setup(
RequirementMode.CONSTRAINTS_RESOLVE_ALL,
_internal_only=True,
run_against_entire_lockfile=True,
_platforms=False,
expected_reqs=repository_pex_request__constraints.requirements,
expected_pexes=[repository_pex__constraints],
)
# Constraints file without resolve_all_constraints: always PexRequirements with
# constraint_strings as the global constraints.
for internal_only in (True, False):
assert_setup(
RequirementMode.CONSTRAINTS_NO_RESOLVE_ALL,
_internal_only=internal_only,
_platforms=platforms,
expected_reqs=PexRequirements(
req_strings, constraints_strings=global_requirement_constraints
),
)
for platforms in (True, False):
assert_setup(
RequirementMode.CONSTRAINTS_NO_RESOLVE_ALL,
_internal_only=False,
_platforms=platforms,
expected_reqs=PexRequirements(
req_strings, constraints_strings=global_requirement_constraints
),
)
# No constraints and lockfiles: return PexRequirements without modification.
for internal_only in (True, False):
assert_setup(
RequirementMode.NO_LOCKS,
_internal_only=internal_only,
_platforms=False,
expected_reqs=PexRequirements(req_strings),
)
assert_setup(
RequirementMode.NO_LOCKS,
_internal_only=False,
_platforms=True,
expected_reqs=PexRequirements(req_strings),
)
@dataclass(frozen=True)
class Project:
name: str
version: str
build_deps = ["setuptools==54.1.2", "wheel==0.36.2"]
setuptools_poetry_lockfile = r"""
# This lockfile was autogenerated by Pants. To regenerate, run:
#
# ./pants generate-lockfiles --resolve=setuptools
#
# --- BEGIN PANTS LOCKFILE METADATA: DO NOT EDIT OR REMOVE ---
# {
# "version": 2,
# "valid_for_interpreter_constraints": [
# "CPython>=3.7"
# ],
# "generated_with_requirements": [
# "setuptools==54.1.2"
# ]
# }
# --- END PANTS LOCKFILE METADATA ---
setuptools==54.1.2; python_version >= "3.6" \
--hash=sha256:dd20743f36b93cbb8724f4d2ccd970dce8b6e6e823a13aa7e5751bb4e674c20b \
--hash=sha256:ebd0148faf627b569c8d2a1b20f5d3b09c873f12739d71c7ee88f037d5be82ff
"""
def create_project_dir(workdir: Path, project: Project) -> PurePath:
project_dir = workdir / "projects" / project.name
project_dir.mkdir(parents=True)
(project_dir / "pyproject.toml").write_text(
dedent(
f"""\
[build-system]
requires = {build_deps}
build-backend = "setuptools.build_meta"
"""
)
)
(project_dir / "setup.cfg").write_text(
dedent(
f"""\
[metadata]
name = {project.name}
version = {project.version}
"""
)
)
return project_dir
def create_dists(workdir: Path, project: Project, *projects: Project) -> PurePath:
project_dirs = [create_project_dir(workdir, proj) for proj in (project, *projects)]
pex = workdir / "pex"
subprocess.run(
args=[
sys.executable,
"-m",
"pex",
*project_dirs,
*build_deps,
"--include-tools",
"-o",
pex,
],
check=True,
)
find_links = workdir / "find-links"
subprocess.run(
args=[
sys.executable,
"-m",
"pex.tools",
pex,
"repository",
"extract",
"--find-links",
find_links,
],
check=True,
)
return find_links
def requirements(rule_runner: PythonRuleRunner, pex: Pex) -> list[str]:
return cast(List[str], get_all_data(rule_runner, pex).info["requirements"])
def test_constraints_validation(tmp_path: Path, rule_runner: PythonRuleRunner) -> None:
sdists = tmp_path / "sdists"
sdists.mkdir()
find_links = create_dists(
sdists,
Project("Foo-Bar", "1.0.0"),
Project("Bar", "5.5.5"),
Project("baz", "2.2.2"),
Project("QUX", "3.4.5"),
)
# Turn the project dir into a git repo, so it can be cloned.
gitdir = tmp_path / "git"
gitdir.mkdir()
foorl_dir = create_project_dir(gitdir, Project("foorl", "9.8.7"))
with pushd(str(foorl_dir)):
subprocess.check_call(["git", "init"])
subprocess.check_call(["git", "config", "user.name", "dummy"])
subprocess.check_call(["git", "config", "user.email", "dummy@dummy.com"])
subprocess.check_call(["git", "add", "--all"])
subprocess.check_call(["git", "commit", "-m", "initial commit"])
subprocess.check_call(["git", "branch", "9.8.7"])
# This string won't parse as a Requirement if it doesn't contain a netloc,
# so we explicitly mention localhost.
url_req = f"foorl@ git+file://localhost{foorl_dir.as_posix()}@9.8.7"
rule_runner.write_files(
{
"util.py": "",
"app.py": "",
"BUILD": dedent(
f"""
python_requirement(name="foo", requirements=["foo-bar>=0.1.2"])
python_requirement(name="bar", requirements=["bar==5.5.5"])
python_requirement(name="baz", requirements=["baz"])
python_requirement(name="foorl", requirements=["{url_req}"])
python_sources(name="util", sources=["util.py"], dependencies=[":foo", ":bar"])
python_sources(name="app", sources=["app.py"], dependencies=[":util", ":baz", ":foorl"])
"""
),
"constraints1.txt": dedent(
"""
# Comment.
--find-links=https://duckduckgo.com
Foo._-BAR==1.0.0 # Inline comment.
bar==5.5.5
baz==2.2.2
qux==3.4.5
# Note that pip does not allow URL requirements in constraints files,
# so there is no mention of foorl here.
"""
),
}
)
# Create and parse the constraints file.
constraints1_filename = "constraints1.txt"
rule_runner.set_options(
[f"--python-requirement-constraints={constraints1_filename}"], env_inherit={"PATH"}
)
constraints1_strings = [str(c) for c in rule_runner.request(GlobalRequirementConstraints, [])]
def get_pex_request(
constraints_file: str | None,
resolve_all_constraints: bool | None,
*,
_additional_args: Iterable[str] = (),
_additional_lockfile_args: Iterable[str] = (),
) -> PexRequest:
args = ["--backend-packages=pants.backend.python"]
request = PexFromTargetsRequest(
[Address("", target_name="app")],
output_filename="demo.pex",
internal_only=True,
additional_args=_additional_args,
additional_lockfile_args=_additional_lockfile_args,
)
if resolve_all_constraints is not None:
args.append(f"--python-resolve-all-constraints={resolve_all_constraints!r}")
if constraints_file:
args.append(f"--python-requirement-constraints={constraints_file}")
args.append("--python-repos-indexes=[]")
args.append(f"--python-repos-repos={find_links}")
rule_runner.set_options(args, env_inherit={"PATH"})
pex_request = rule_runner.request(PexRequest, [request])
assert OrderedSet(_additional_args).issubset(OrderedSet(pex_request.additional_args))
return pex_request
additional_args = ["--strip-pex-env"]
additional_lockfile_args = ["--no-strip-pex-env"]
pex_req1 = get_pex_request(constraints1_filename, resolve_all_constraints=False)
assert isinstance(pex_req1.requirements, PexRequirements)
assert pex_req1.requirements.constraints_strings == FrozenOrderedSet(constraints1_strings)
req_strings_obj1 = rule_runner.request(ReqStrings, (pex_req1.requirements,))
assert req_strings_obj1.req_strings == ("bar==5.5.5", "baz", "foo-bar>=0.1.2", url_req)
pex_req2 = get_pex_request(
constraints1_filename,
resolve_all_constraints=True,
_additional_args=additional_args,
_additional_lockfile_args=additional_lockfile_args,
)
pex_req2_reqs = pex_req2.requirements
assert isinstance(pex_req2_reqs, PexRequirements)
req_strings_obj2 = rule_runner.request(ReqStrings, (pex_req2_reqs,))
assert req_strings_obj2.req_strings == ("bar==5.5.5", "baz", "foo-bar>=0.1.2", url_req)
assert isinstance(pex_req2_reqs.from_superset, Pex)
repository_pex = pex_req2_reqs.from_superset
assert not get_all_data(rule_runner, repository_pex).info["strip_pex_env"]
assert ["Foo._-BAR==1.0.0", "bar==5.5.5", "baz==2.2.2", "foorl", "qux==3.4.5"] == requirements(
rule_runner, repository_pex
)
with engine_error(
ValueError,
contains=softwrap(
"""
`[python].resolve_all_constraints` is enabled, so
`[python].requirement_constraints` must also be set.
"""
),
):
get_pex_request(None, resolve_all_constraints=True)
# Shouldn't error, as we don't explicitly set --resolve-all-constraints.
get_pex_request(None, resolve_all_constraints=None)
@pytest.mark.parametrize("include_requirements", [False, True])
def test_exclude_requirements(
include_requirements: bool, tmp_path: Path, rule_runner: PythonRuleRunner
) -> None:
sdists = tmp_path / "sdists"
sdists.mkdir()
find_links = create_dists(sdists, Project("baz", "2.2.2"))
rule_runner.write_files(
{
"BUILD": dedent(
"""
python_requirement(name="baz", requirements=["foo==1.2.3"])
python_sources(name="app", sources=["app.py"], dependencies=[":baz"])
"""
),
"constraints.txt": dedent("foo==1.2.3"),
"app.py": "",
}
)
rule_runner.set_options(
[
"--backend-packages=pants.backend.python",
"--python-repos-indexes=[]",
f"--python-repos-repos={find_links}",
],
env_inherit={"PATH"},
)
request = PexFromTargetsRequest(
[Address("", target_name="app")],
output_filename="demo.pex",
internal_only=True,
include_requirements=include_requirements,
)
pex_request = rule_runner.request(PexRequest, [request])
assert isinstance(pex_request.requirements, PexRequirements)
assert len(pex_request.requirements.req_strings_or_addrs) == (1 if include_requirements else 0)
@pytest.mark.parametrize("include_sources", [False, True])
def test_exclude_sources(include_sources: bool, rule_runner: PythonRuleRunner) -> None:
rule_runner.write_files(
{
"BUILD": dedent(
"""
python_sources(name="app", sources=["app.py"])
"""
),
"app.py": "",
}
)
rule_runner.set_options(
[
"--backend-packages=pants.backend.python",
"--python-repos-indexes=[]",
],
env_inherit={"PATH"},
)
request = PexFromTargetsRequest(
[Address("", target_name="app")],
output_filename="demo.pex",
internal_only=True,
include_source_files=include_sources,
)
pex_request = rule_runner.request(PexRequest, [request])
snapshot = rule_runner.request(Snapshot, [pex_request.sources])
assert len(snapshot.files) == (1 if include_sources else 0)
def test_include_sources_without_transitive_package_sources(rule_runner: PythonRuleRunner) -> None:
rule_runner.write_files(
{
"src/app/BUILD": dedent(
"""
python_sources(
name="app",
sources=["app.py"],
dependencies=["//src/dep:pkg"],
)
"""
),
"src/app/app.py": "",
"src/dep/BUILD": dedent(
# This test requires a package that has a standard dependencies field.
# 'pex_binary' has a dependencies field; 'archive' does not.
"""
pex_binary(name="pkg", dependencies=[":dep"])
python_sources(name="dep", sources=["dep.py"])
"""
),
"src/dep/dep.py": "",
}
)
rule_runner.set_options(
[
"--backend-packages=pants.backend.python",
"--python-repos-indexes=[]",
],
env_inherit={"PATH"},
)
request = PexFromTargetsRequest(
[Address("src/app", target_name="app")],
output_filename="demo.pex",
internal_only=True,
include_source_files=True,
)
pex_request = rule_runner.request(PexRequest, [request])
snapshot = rule_runner.request(Snapshot, [pex_request.sources])
# the packaged transitive dep is excluded
assert snapshot.files == ("app/app.py",)
@pytest.mark.parametrize("enable_resolves", [False, True])
def test_cross_platform_pex_disables_subsetting(
rule_runner: PythonRuleRunner, enable_resolves: bool
) -> None:
# See https://github.com/pantsbuild/pants/issues/12222.
lockfile = "3rdparty/python/default.lock"
constraints = ["foo==1.0", "bar==1.0"]
rule_runner.write_files(
{
lockfile: "\n".join(constraints),
"a.py": "",
"BUILD": dedent(
"""
python_requirement(name="foo",requirements=["foo"])
python_requirement(name="bar",requirements=["bar"])
python_sources(name="lib",dependencies=[":foo"])
"""
),
}
)
if enable_resolves:
options = [
"--python-enable-resolves",
# NB: Because this is a synthetic lockfile without a header.
"--python-invalid-lockfile-behavior=ignore",
]
else:
options = [
f"--python-requirement-constraints={lockfile}",
"--python-resolve-all-constraints",
]
rule_runner.set_options(options, env_inherit={"PATH"})
request = PexFromTargetsRequest(
[Address("", target_name="lib")],
output_filename="demo.pex",
internal_only=False,
platforms=PexPlatforms(["some-platform-x86_64"]),
)
result = rule_runner.request(PexRequest, [request])
assert result.requirements == PexRequirements(
request.addresses,
constraints_strings=constraints,
description_of_origin="//:lib",
)
class ResolveMode(Enum):
resolve_all_constraints = "resolve_all_constraints"
poetry_or_manual = "poetry_or_manual"
pex = "pex"
@pytest.mark.parametrize(
"mode,internal_only,run_against_entire_lockfile",
[(m, io, rael) for m in ResolveMode for io in [True, False] for rael in [True, False]],
)
def test_lockfile_requirements_selection(
rule_runner: PythonRuleRunner,
mode: ResolveMode,
internal_only: bool,
run_against_entire_lockfile: bool,
) -> None:
mode_files: dict[str, str | bytes] = {
"a.py": "",
"BUILD": dedent(
"""
python_sources(name="lib", dependencies=[":setuptools"])
python_requirement(name="setuptools", requirements=["setuptools"])
"""
),
}
if mode == ResolveMode.resolve_all_constraints:
mode_files.update({"constraints.txt": "setuptools==54.1.2"})
elif mode == ResolveMode.poetry_or_manual:
mode_files.update({"3rdparty/python/default.lock": setuptools_poetry_lockfile})
else:
assert mode == ResolveMode.pex
lock_content = importlib.resources.read_binary(
"pants.backend.python.subsystems", "setuptools.lock"
)
mode_files.update({"3rdparty/python/default.lock": lock_content})
rule_runner.write_files(mode_files)
if mode == ResolveMode.resolve_all_constraints:
options = [
"--python-requirement-constraints=constraints.txt",
]
else:
# NB: It doesn't matter what the lockfile generator is set to: only what is actually on disk.
options = [
"--python-enable-resolves",
"--python-default-resolve=myresolve",
"--python-resolves={'myresolve':'3rdparty/python/default.lock'}",
]
if run_against_entire_lockfile:
options.append("--python-run-against-entire-lockfile")
request = PexFromTargetsRequest(
[Address("", target_name="lib")],
output_filename="demo.pex",
internal_only=internal_only,
main=EntryPoint("a"),
)
rule_runner.set_options(options, env_inherit={"PATH"})
result = rule_runner.request(PexRequest, [request])
assert result.layout == (PexLayout.PACKED if internal_only else PexLayout.ZIPAPP)
assert result.main == EntryPoint("a")
if run_against_entire_lockfile and internal_only:
# With `run_against_entire_lockfile`, all internal requests result in the full set
# of requirements, but that is encoded slightly differently per mode.
if mode == ResolveMode.resolve_all_constraints:
# NB: The use of the legacy constraints file with `resolve_all_constraints` requires parsing
# and manipulation of the constraints, and needs to include transitive deps (unlike other
# lockfile requests). So it is emitted as `PexRequirements` rather than EntireLockfile.
assert isinstance(result.requirements, PexRequirements)
assert not result.requirements.from_superset
else:
assert mode in (ResolveMode.poetry_or_manual, ResolveMode.pex)
assert isinstance(result.requirements, EntireLockfile)
else:
assert isinstance(result.requirements, PexRequirements)
if mode in (ResolveMode.resolve_all_constraints, ResolveMode.poetry_or_manual):
assert isinstance(result.requirements.from_superset, Pex)
assert not get_all_data(rule_runner, result.requirements.from_superset).is_zipapp
else:
assert mode == ResolveMode.pex
assert isinstance(result.requirements.from_superset, Resolve)
assert result.requirements.from_superset.name == "myresolve"
def test_warn_about_files_targets(rule_runner: PythonRuleRunner, caplog) -> None:
rule_runner.write_files(
{
"app.py": "",
"file.txt": "",
"resource.txt": "",
"BUILD": dedent(
"""
file(name="file_target", source="file.txt")
resource(name="resource_target", source="resource.txt")
python_sources(name="app", dependencies=[":file_target", ":resource_target"])
"""
),
}
)
rule_runner.request(
PexRequest,
[
PexFromTargetsRequest(
[Address("", target_name="app")],
output_filename="app.pex",
internal_only=True,
warn_for_transitive_files_targets=True,
)
],
)
assert "The target //:app (`python_source`) transitively depends on" in caplog.text
# files are not fine:
assert "//:file_target" in caplog.text
# resources are fine:
assert "resource_target" not in caplog.text
assert "resource.txt" not in caplog.text
|
# -*- coding: utf-8 -*-
import os
import sys
import pathlib
from subprocess import check_output
import time, datetime
import urllib, requests
import json
from datetime import datetime, timedelta
from random import random
import pprint
import copy
import subprocess
from flask import Blueprint, render_template, request, jsonify, make_response, redirect, url_for, flash
from flask_login import login_required
from flask import send_file
from .system_info import *
from .forms import AdminConfigForm
from ..views.auth import auth
from ..models import AdminConfig
# from ..constantes import LOGGER_PATH
from .. import db
from .charts import *
from util import *
import config
admin_blueprint = Blueprint("admin", __name__, url_prefix='/admin')
@admin_blueprint.route('/chart', methods=['GET'])
@login_required
def chart():
return render_template('admin/chart.html', params = params)
@admin_blueprint.route('/', methods=['GET'])
@login_required
def index():
"""Display System Info"""
params = {}
params["network"] = {
'ip lan eth' : ip_lan_eth(),
'ip lan wifi' : ip_lan_wifi(),
'hostname' : hostname()
}
params["system"] = {
'date' : system_date(),
'uptime' : system_uptime(),
'cpu_temp' : cpu_temp()
}
params["Nexmo (SMS)"] = {
'Balance' : nexmo_balance()
}
params["disk_space"] = disk_space()
params["supervisor"] = supervisor_status()
params["db_size"] = db_size()
return render_template('admin/admin.html', params = params)
def restart_supervisor_minute_phase_deamon():
""" Need to restart daemon to take into account new admin value
ret est le retour attendu de la fonction. Si retour différent: log error
"""
cmd_prod = """sudo supervisorctl restart prod:minute_phase"""
cmd_dev = """sudo supervisorctl restart dev:minute_phase_dev"""
ret_prod = """prod:minute_phase: stopped
prod:minute_phase: started
"""
ret_dev = """dev:minute_phase_dev: stopped
dev:minute_phase_dev: started
"""
cmd = cmd_prod
ret = ret_prod
if app.config['ENVNAME'] == 'Dev':
cmd = cmd_dev
ret = ret_dev
stdout = subprocess.check_output(cmd, shell=True)
stdout = stdout.decode('utf-8')
sys.stdout.write('SUCCESS '+ cmd+'\n') and sys.stdout.flush()
if (ret==stdout):
sys.stderr.write('FAILED '+ cmd + '\n') and sys.stderr.flush()
@admin_blueprint.route('/config', methods=['GET', 'POST'])
@login_required
def config():
admin_config = AdminConfig.query.first()
if admin_config is None:
abort(404)
form = AdminConfigForm(obj=admin_config, csrf_enabled=False)
if request.method == 'POST':
if form.validate():
# admin_config.temp_chaudiere_failure = form.temp_chaudiere_failure.data
form.populate_obj(admin_config)
db.session.commit()
# flash(u'updated', 'success')
sys.stdout.write("Update admin config\n") and sys.stdout.flush()
restart_supervisor_minute_phase_deamon()
return render_template('admin/admin_config.html', form=form, success=True)
else:
pass
# flash(u'Error in form', 'danger')
return render_template('admin/admin_config.html', form=form)
@admin_blueprint.route('/log', defaults={'file': None}, methods=['GET'])
@admin_blueprint.route('/log/<string:file>', methods=['GET'])
@login_required
def log(file):
""" list all log files in ~/Prod/log/ ending with *.err or *.log
"""
# LOG_PATH = '/home/pi/Dev/log'
# log_path = pathlib.Path('LOG_PATH')
log_path = pathlib.Path(app.config['LOG_PATH'])
all_files = os.listdir(log_path)
files = [ file for file in all_files if (file.endswith('.log') or file.endswith('.err')) ]
content = ''
if file and (file in files):
stream = open(log_path / file, 'r')
content = stream.read()
stream.close()
# try:
# stream = open(log_path / file, 'r+')
# except PermissionError:
# if file owned by root, must be chown by pi to be readable
# check_output("sudo chown pi "+ str(log_path / file), shell=True)
# stream = open(log_path / file, 'r+')
# finally:
# content = stream.read()
# stream.close()
return render_template('admin/admin_log.html', content=content,
active_file=file,
files=files)
@admin_blueprint.route('/log/download/<string:file>', methods=['GET'])
@login_required
def download_log(file):
""" Download a log file
"""
if (file.endswith('.log') or file.endswith('.err')):
log_path = pathlib.Path(app.config['LOG_PATH'])
return send_file(str(log_path / file), as_attachment=True)
|
def reverse(S):
stack=[]
for each in S:
stack.append(each)
s=""
while stack:
s+=stack.pop()
return s
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.