blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 281 | content_id stringlengths 40 40 | detected_licenses listlengths 0 57 | license_type stringclasses 2 values | repo_name stringlengths 6 116 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 313 values | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 18.2k 668M ⌀ | star_events_count int64 0 102k | fork_events_count int64 0 38.2k | gha_license_id stringclasses 17 values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 107 values | src_encoding stringclasses 20 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 4 6.02M | extension stringclasses 78 values | content stringlengths 2 6.02M | authors listlengths 1 1 | author stringlengths 0 175 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
861619f37d3f45ca55feb13d85f1c0ec4990fcef | 52a3beeb07ad326115084a47a9e698efbaec054b | /horizon/.venv/bin/pyscss | 80baac9d659e74f232c739e2139c1b9408819faa | [
"Apache-2.0"
] | permissive | bopopescu/sample_scripts | 3dade0710ecdc8f9251dc60164747830f8de6877 | f9edce63c0a4d636f672702153662bd77bfd400d | refs/heads/master | 2022-11-17T19:19:34.210886 | 2018-06-11T04:14:27 | 2018-06-11T04:14:27 | 282,088,840 | 0 | 0 | null | 2020-07-24T00:57:31 | 2020-07-24T00:57:31 | null | UTF-8 | Python | false | false | 319 | #!/home/horizon/horizon/.venv/bin/python
# EASY-INSTALL-ENTRY-SCRIPT: 'pyScss==1.3.5','console_scripts','pyscss'
__requires__ = 'pyScss==1.3.5'
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.exit(
load_entry_point('pyScss==1.3.5', 'console_scripts', 'pyscss')()
)
| [
"Suhaib.Chishti@exponential.com"
] | Suhaib.Chishti@exponential.com | |
029601d73023044b3cbf50c8f580bd1ea21d97fb | 37e49d0866f2a3668333e438f89d91c5de181f3b | /singlepage2/singlepage2/urls.py | 1648b5a218ce4d5e55d0a83d6f4d2db94aa70a92 | [] | no_license | pireats-abhi/user-interface-js-django | f6146f128bdb522db4f2b08356b29d5736ffb173 | 56128bea7b1128974401c1ff190f89438dd9e077 | refs/heads/master | 2022-12-23T20:22:53.796265 | 2020-10-02T07:11:48 | 2020-10-02T07:11:48 | 300,529,897 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 803 | py | """singlepage2 URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import include, path
urlpatterns = [
path('admin/', admin.site.urls),
path('', include('singlepage.urls'))
]
| [
"pireats.abhi@gmail.com"
] | pireats.abhi@gmail.com |
99599038e3f80650a765e87ef0ff275f2dcf95a0 | 21d89e598ceae51ff889f3788c728b25cdbc9dbc | /critical_critiques/critical_critiques/views.py | 0f1633895ac0c2e01b1c57e268f1891ebcf1a1c6 | [
"MIT"
] | permissive | team-stroller/critical_critiques | 5723938f49dcfff07f6cfd914ae8202a4354b4d5 | 99f3b073abbfda3ad62ea01f72d39756a1e5b088 | refs/heads/master | 2016-08-08T03:20:21.607260 | 2013-05-11T17:00:08 | 2013-05-11T17:00:08 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 322 | py | from django.contrib.auth import logout
from django.views.generic.base import RedirectView
from django.core.urlresolvers import reverse
class LogoutView(RedirectView):
url = "/"
def get(self, request, *args, **kwargs):
logout(request)
return super(LogoutView, self).get(request, *args, **kwargs)
| [
"mike@mike-burns.com"
] | mike@mike-burns.com |
d2313b4137772c00967dfbbe494417a8b971574d | 0379bae8cff218a039d79104c38cd1ad2b506919 | /vision/tests/unit/test_artworks.py | a7ccbafaf3bd3a73289faaea734d71912bb3628a | [] | no_license | ottozrq/Louvre | 0128673e05cdabbd1acc5499605a31232b110682 | 170d3421c821694c31b6fee49c99e97fe76f728e | refs/heads/master | 2022-07-14T04:21:42.390515 | 2022-07-03T19:50:38 | 2022-07-03T19:50:38 | 167,688,883 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,831 | py | from tests import ApiClient, m, status
def test_get_artworks_artwork_id(cl: ApiClient, artwork_1):
assert m.Artwork.from_db(artwork_1) == m.Artwork.from_response(
cl(f"/artworks/{artwork_1.artwork_id}")
)
def test_get_artworks(cl: ApiClient, landmark_1, artwork_1):
assert m.ArtworkCollection.from_response(
cl(f"/landmarks/{landmark_1.landmark_id}/artworks")
).contents == [
m.Artwork.from_db(artwork_1)
]
def test_post_artworks(cl: ApiClient, mocker, landmark_1):
mocker.patch("utils.algo.get_image_descriptor")
artwork = m.Artwork.from_response(
cl(
f"/landmarks/{landmark_1.landmark_id}/artworks",
method="POST",
data=m.ArtworkCreate(
artwork_name={"en": "Art"},
cover_image="art.jpg",
description={"en": "This is Art"},
extra={},
geometry=m.GeometryElement(
coordinates=[1, 1], type=m.GeometryType.Point
),
),
)
)
assert artwork == m.Artwork(
cover_image="art.jpg",
description={"en": "This is Art"},
landmark=f"/landmarks/{landmark_1.landmark_id}",
extra={},
geometry=m.GeometryElement(coordinates=[1, 1], type=m.GeometryType.Point),
artwork_name={"en": "Art"},
self_link=f"/artworks/{artwork.artwork_id}",
kind=m.Kind.artwork,
artwork_id=artwork.artwork_id,
)
def test_patch_artworks_artwork_id(cl: ApiClient, mocker, landmark_1, artwork_1):
mocker.patch("utils.algo.get_image_descriptor")
artwork = m.Artwork.from_response(
cl(
f"/artworks/{artwork_1.artwork_id}",
method="PATCH",
data=m.ArtworkPatch(
artwork_name={"fr": "Art fr"},
cover_image="art_edit.jpg",
description={"fr": "This is Art fr"},
extra={"edit": "sth"},
geometry=m.GeometryElement(
coordinates=[2, 2], type=m.GeometryType.Point
),
),
)
)
assert artwork == m.Artwork(
cover_image="art_edit.jpg",
description={"en": "This is Art", "fr": "This is Art fr"},
landmark=f"/landmarks/{landmark_1.landmark_id}",
extra={"edit": "sth"},
artwork_rate=1,
geometry=m.GeometryElement(coordinates=[2, 2], type=m.GeometryType.Point),
artwork_name={"en": "Art", "fr": "Art fr"},
self_link=f"/artworks/{artwork.artwork_id}",
kind=m.Kind.artwork,
artwork_id=artwork.artwork_id,
)
def test_delete_artwork_artwork_id(cl: ApiClient, artwork_1):
cl(
f"/artworks/{artwork_1.artwork_id}",
method="DELETE",
status=status.HTTP_204_NO_CONTENT,
)
| [
"zhang_r@epita.fr"
] | zhang_r@epita.fr |
6a19fed144487fa7d519f1231fecc8f5b63bc650 | 830bcc71b7924094644f36a1e3f670086a8ea109 | /deep_cluster/triplets/triplets_gui.py | 7295591f07163a9076a557d3c766497efc442060 | [] | no_license | ysterin/deep_cluster | 550ec9b6a4266603cf275e5094b2d435426b0359 | e1d5f8d01332d3bcb9c7de75340a68e06e14e88e | refs/heads/master | 2023-06-13T08:48:25.566379 | 2021-07-11T17:53:15 | 2021-07-11T17:53:15 | 294,084,774 | 0 | 0 | null | 2020-12-22T17:47:53 | 2020-09-09T10:54:11 | Jupyter Notebook | UTF-8 | Python | false | false | 17,920 | py | import sys
# sys.path.append('..')
import os
import numpy as np
import cv2 as cv
from scipy import signal as sig
from collections import defaultdict
from pathlib import Path
from contextlib import contextmanager
from deep_cluster.dataloader import LandmarkDataset, SequenceDataset
from matplotlib import pyplot as plt
from collections import Counter
import torch
from torch.utils.data import ConcatDataset
import pickle
import re
import math
import time
import tkinter as tk
from PIL import ImageTk, Image
from threading import Thread, Event
from multiprocessing import Process
from deep_cluster.triplets.landmarks_video import Video, LandmarksVideo
from deep_cluster.triplets.sample_triplets import Segment, triplets_segments_gen, load_segments
import math
from contextlib import contextmanager
@contextmanager
def timethis(label):
t0 = time.time()
yield
elapsed = time.time() - t0
print(f"{label} took {elapsed} seconds")
def get_seg_clip(vid, seg, n_frames, fps=120):
frames = vid[seg.start_frame: seg.start_frame + seg.n_frames: int(math.ceil(vid.fps/fps))]
frames = list(frames)
if n_frames < len(frames):
n_frames_to_discard = len(frames) - n_frames
n_frames_to_discard_beginning = math.floor(n_frames_to_discard / 2)
n_frames_to_discard_end = math.ceil(n_frames_to_discard / 2)
frames = frames[n_frames_to_discard_beginning: - n_frames_to_discard_end]
n_pad = n_frames - len(frames)
pad_beginning, pad_ending = math.floor(n_pad / 2), math.ceil(n_pad / 2)
frames = [frames[0]] * pad_beginning + frames + [frames[-1]] * pad_ending
return np.stack(frames)
def get_random_clips(vid, duration=0.5, max_n_clips=100, fps=60):
for i in range(max_n_clips):
random_idxs = np.random.randint(0, 3*10**5, size=(3,))
# print(vid[random_idxs[0]].shape)
n_frames = int(duration * vid.fps)
yield [vid[idx:idx + n_frames: int(math.ceil(vid.fps/fps))] for idx in random_idxs]
'''
A widget for displaying animation.
root: parent of widget
frames: list of frames to animate, each frame is a numpy array.
n_frames: number of frames to show in the animation - if less then length of frames, discard frames from beginning
and end as needed. if more, pad with same frame in beginning and end.
'''
class Animation(tk.Canvas):
def __init__(self, root, frames, n_frames=None, fps=30, *args, **kwargs):
# self.n_frames = len(frames)
self.interval = 1 / fps
self.root = root
self.stop = Event()
if 'width' in kwargs:
width = kwargs['width']
height = kwargs['height']
else:
height, width, *_ = frames[0].shape
if 'rescale' in kwargs:
height, width = int(height * kwargs['rescale']), int(width * kwargs['rescale'])
tk.Canvas.__init__(self, root, width=width, height=height, *args)
self.n_frames = n_frames if n_frames else len(frames)
if self.n_frames < len(frames):
n_frames_to_discard = len(frames) - self.n_frames
n_frames_to_discard_beginning = math.floor(n_frames_to_discard / 2)
n_frames_to_discard_end = math.ceil(n_frames_to_discard / 2)
frames = frames[n_frames_to_discard_beginning: - n_frames_to_discard_end]
self.images = [ImageTk.PhotoImage(Image.fromarray(frame).resize((width, height))) for frame in frames]
n_pad = self.n_frames - len(frames)
self.pad_beginning, self.pad_ending = math.floor(n_pad / 2), math.ceil(n_pad / 2)
self.images = [self.images[0]] * self.pad_beginning + self.images + [self.images[-1]] * self.pad_ending
self.pack()
self.thread = Thread(target=self.animation)
# self.thread.setDaemon(True)
self.root.after(0, self.thread.start)
def animation(self):
try:
while not self.stop.is_set():
for i in range(self.n_frames):
time.sleep(self.interval)
if self.stop.is_set():
return
self.create_image(0, 0, image=self.images[i], anchor='nw')
self.update()
except tk.TclError as e:
print("[INFO] caught a RuntimeError")
def destroy(self):
del self.images
self.stop.set()
self.thread.join(0.1)
super().destroy()
class ClipsDisplay(tk.Frame):
# @profile
def __init__(self, root, clips, fps=30, *args, **kwargs):
tk.Frame.__init__(self, root, *args, **kwargs)
self.window = tk.Frame(self)
n_frames = int(np.mean([len(clip) for clip in clips]))
self.anchor_anim = Animation(self, clips[0], fps=fps, n_frames=n_frames, rescale=1.5)#, width=100, height=240)
self.anchor_anim.pack(side=tk.LEFT)
pos_frame = tk.Frame(self.window)
neg_frame = tk.Frame(self.window)
choice_var = tk.IntVar(self)
self.choice_var = choice_var
radio_button_1 = tk.Radiobutton(pos_frame, var=choice_var, value=1)
radio_button_2 = tk.Radiobutton(neg_frame, var=choice_var, value=2)
self.pos_anim = Animation(pos_frame, clips[1], fps=fps, n_frames=n_frames, rescale=1.5)#, width=100, height=200)
self.neg_anim = Animation(neg_frame, clips[2], fps=fps, n_frames=n_frames, rescale=1.5)#, width=100, height=200)
self.pos_anim.pack()
self.neg_anim.pack()
radio_button_1.pack(side=tk.BOTTOM)
radio_button_2.pack(side=tk.BOTTOM)
pos_frame.pack(side=tk.LEFT)
neg_frame.pack(side=tk.LEFT)
self.window.pack()
self.pack()
def destroy(self):
self.anchor_anim.destroy()
self.pos_anim.destroy()
self.neg_anim.destroy()
super().destroy()
import pandas as pd
class App(tk.Frame):
def __init__(self, root, video, encoded=None, save_file=None, *args, **kwargs):
tk.Frame.__init__(self, root, *args, **kwargs)
self.root = root
self.encoded = encoded
# self.df = pd.DataFrame(columns=['video_file', 'anchor', 'sample1', 'sample2', 'selected'], index=pd.Index(np.arange(100)))
self.saved_triplets = []
self.video = video
if not save_file:
save_file = 'data/selected_triplets.csv'
self.save_file = save_file
# self.triplets_gen = triplet_segment_gen
self.i = 1
self.display = tk.Frame()
self.display.pack()
self.next_button = tk.Button(self, command=self.next, text="NEXT")
self.next_button.pack(side=tk.BOTTOM)
self.ilabel = tk.Label(self, text=str(self.i), font=("Courier", 32))
self.ilabel.pack(side=tk.LEFT)
if self.encoded is not None or True:
self.dist_label1 = tk.Label(self, text='distance from anchor to positive:')
self.dist_label1.pack(side=tk.BOTTOM)
self.dist_label2 = tk.Label(self, text='distance from anchor to negative:')
self.dist_label2.pack(side=tk.BOTTOM)
self.dist_label3 = tk.Label(self, text='distance from negative to positive:')
self.dist_label3.pack(side=tk.BOTTOM)
self.bind('<Return>', lambda event: self.save())
self.bind('<space>', self.next)
self.bind('w', self.next)
self.bind('q', lambda event: self.quit())
self.focus_set()
self.load_clips()
self.reload_display()
self.bind('<Left>', lambda event: self.display.choice_var.set(1))
self.bind('<Right>', lambda event: self.display.choice_var.set(2))
self.bind('<Down>', lambda event: self.display.choice_var.set(0))
self.bind('a', lambda event: self.display.choice_var.set(1))
self.bind('d', lambda event: self.display.choice_var.set(2))
self.bind('s', lambda event: self.display.choice_var.set(0))
self.bind('e', lambda event: self.save())
self.pack()
def filter_segment(self, segment):
segment_df = self.video.landmarks.df.loc[segment].drop(['tail1', 'tail2', 'tail3'], axis=1, level=0)
confidence = np.prod(segment_df.xs('likelihood', level=1, axis=1, drop_level=True) > 0.9, axis=1)
if confidence.mean() < 0.8:
return False
data = segment_df.drop('likelihood', level=1, axis=1).values.astype(np.float32)
ff, Pxx = sig.periodogram(data.T, fs=segment_df.attrs['fps'])
energy = Pxx[:,:10].mean()
if energy < 2e0:
return False
return True
def sample_random_triplets(self, n_frames=60, fps=120):
selected_segments = []
n_tries = 0
while len(selected_segments) < 3:
n_tries += 1
start_idx = np.random.randint(len(self.video) - self.video.fps)
segment = slice(start_idx, start_idx + n_frames, int(math.ceil(self.video.fps/fps)))
if self.filter_segment(segment):
selected_segments.append(segment)
if n_tries > 1000:
raise Exception("too many tries to sample segments")
self.segments = selected_segments
# enc_segments = [slice(idx // 4 + 15, (idx + n_frames) // 4 - 15 + 1) for idx in random_idxs]
if self.encoded is not None:
self.clip_encodeings = [self.encoded[idx // int(len(self.video)/len(self.encoded))] for idx in random_idxs]
# print(self.clip_encodeings[0].shape)
if hasattr(self, 'clips'):
del self.clips
self.clips = [self.video[seg] for seg in self.segments]
def next(self, event=None):
self.save_triplet()
self.i += 1
self.reload_display()
def reload_display(self):
self.display.destroy()
self.display = ClipsDisplay(self, self.clips, fps=60)
self.display.pack(side=tk.TOP)
if self.encoded is not None:
dist1 = np.linalg.norm(self.clip_encodeings[0] - self.clip_encodeings[1])
dist2 = np.linalg.norm(self.clip_encodeings[0] - self.clip_encodeings[2])
dist3 = np.linalg.norm(self.clip_encodeings[2] - self.clip_encodeings[1])
print(dist1, dist2, dist3)
# self.dist_label1['text'] = f"distance between anchor and sample1: {diffs[0]:.2f}"
# self.dist_label2['text'] = f"distance between anchor and sample2: {diffs[1]:.2f}"
# self.dist_label3['text'] = f"distance between sample2 and sample1: {diffs[2]:.2f}"
diffs = []
for i in range(3):
seg = self.segments[i]
data =self.video.landmarks[seg].T
filt = sig.butter(4, 3, btype='low', output='ba', fs=self.video.fps)
filtered = sig.filtfilt(*filt, data).T
diffs.append(np.linalg.norm(np.diff(filtered, axis=0), axis=-1).mean())
self.dist_label1['text'] = f"anchor: {diffs[0]:.4f}"
self.dist_label2['text'] = f"sample 1: {diffs[1]:.4f}"
self.dist_label3['text'] = f"sample2: {diffs[2]:.4f}"
self.load_clips()
def save_triplet(self):
self.saved_triplets.append({'video_file': self.video.video_file,
'anchor': (self.segments[0].start, self.segments[0].stop),
'sample1': (self.segments[1].start, self.segments[1].stop),
'sample2': (self.segments[2].start, self.segments[2].stop),
'selected': self.display.choice_var.get()})
if self.i % 20 == 0:
self.save()
# @profile
def load_clips(self):
print("start load clips")
# anchor, pos, neg = next(self.triplets_gen)
# self.clips = [get_seg_clip(self.video, seg, n_frames=60, fps=120) for seg in [anchor, pos, neg]]
self.sample_random_triplets()
self.ilabel['text'] = self.i
print("finish load clips")
def save(self):
df = pd.DataFrame.from_records(self.saved_triplets)
# save_path = 'data/selected_triplets.csv'
# import pdb; pdb.set_trace()
mode = 'a' if os.path.exists(self.save_file) else 'w'
df.to_csv(path_or_buf=self.save_file, mode=mode)
self.saved_triplets = []
def quit(self):
print("quitting")
self.save()
self.root.quit()
def decode_seg_string(seg_string):
start, end = seg_string[1:-1].split(',')
start, end = int(start), int(end)
start_idx, end_idx = start , end
return (start_idx, end_idx)
class VerificationApp(tk.Frame):
def __init__(self, root, video, df, encoded=None, to_save=True, start_idx=-1, *args, **kwargs):
tk.Frame.__init__(self, root, *args, **kwargs)
self.root = root
self.df = df
self.to_save = to_save
self.encoded = encoded
# self.df = pd.DataFrame(columns=['video_file', 'anchor', 'sample1', 'sample2', 'selected'], index=pd.Index(np.arange(100)))
self.saved_triplets = []
self.video = video
self.i = start_idx
self.display = tk.Frame()
self.display.pack()
self.next_button = tk.Button(self, command=self.next, text="NEXT")
self.next_button.pack(side=tk.BOTTOM)
self.bind('<Return>', lambda event: self.save())
self.bind('<space>', self.next)
self.bind('w', self.next)
self.bind('q', lambda event: self.quit())
if self.encoded is not None:
self.dist_label1 = tk.Label(self, text='distance from anchor to positive:')
self.dist_label1.pack(side=tk.BOTTOM)
self.dist_label2 = tk.Label(self, text='distance from anchor to negative:')
self.dist_label2.pack(side=tk.BOTTOM)
self.dist_label3 = tk.Label(self, text='distance from negative to positive:')
self.dist_label3.pack(side=tk.BOTTOM)
self.focus_set()
self.load_clips()
self.reload_display()
self.bind('<Left>', lambda event: self.display.choice_var.set(1))
self.bind('<Right>', lambda event: self.display.choice_var.set(2))
self.bind('<Down>', lambda event: self.display.choice_var.set(0))
self.bind('a', lambda event: self.display.choice_var.set(1))
self.bind('d', lambda event: self.display.choice_var.set(2))
self.bind('s', lambda event: self.display.choice_var.set(0))
self.bind('e', lambda event: self.save())
self.pack()
def next(self, event=None):
self.save_triplet()
self.reload_display()
def new_triplet(self, fps=120):
self.i += 1
row = self.df.iloc[self.i]
sample_names = ['anchor', 'sample1', 'sample2']
try:
segments = [decode_seg_string(row[sample]) for sample in sample_names]
except ValueError as e:
print(e)
# import pdb; pdb.set_trace()
return
self.segments = [slice(seg[0], seg[1], int(math.ceil(self.video.fps/fps))) for seg in segments]
if self.encoded is not None:
self.clip_encodeings = [self.encoded[idx // int(len(self.video)/len(self.encoded))] for idx in random_idxs]
self.clips = [self.video[seg] for seg in self.segments]
def reload_display(self):
self.display.destroy()
self.display = ClipsDisplay(self, self.clips, fps=30)
self.display.pack(side=tk.TOP)
if self.encoded is not None:
dist1 = np.linalg.norm(self.clip_encodeings[0] - self.clip_encodeings[1])
dist2 = np.linalg.norm(self.clip_encodeings[0] - self.clip_encodeings[2])
dist3 = np.linalg.norm(self.clip_encodeings[2] - self.clip_encodeings[1])
print(dist1, dist2, dist3)
self.dist_label1['text'] = f"distance between anchor and sample1: {dist1:.2f}"
self.dist_label2['text'] = f"distance between anchor and sample2: {dist2:.2f}"
self.dist_label3['text'] = f"distance between sample2 and sample1: {dist3:.2f}"
self.load_clips()
def save_triplet(self):
self.saved_triplets.append({'video_file': self.video.video_file,
'anchor': (self.segments[0].start, self.segments[0].stop),
'sample1': (self.segments[1].start, self.segments[1].stop),
'sample2': (self.segments[2].start, self.segments[2].stop),
'selected': self.df.iloc[self.i]['selected'],
'selected_verification': self.display.choice_var.get()})
def load_clips(self):
print("start load clips")
# anchor, pos, neg = next(self.triplets_gen)
# self.clips = [get_seg_clip(self.video, seg, n_frames=60, fps=120) for seg in [anchor, pos, neg]]
self.new_triplet()
print("finish load clips")
def save(self):
if not self.to_save:
return
print("saving dataframe...")
df = pd.DataFrame.from_records(self.saved_triplets)
path = 'triplets/data/selected_triplets_verificatio1.csv'
mode = 'a' if os.path.exists(path) else 'w'
df.to_csv(path_or_buf=path, mode=mode)
self.saved_triplets = []
def quit(self):
print("quitting")
self.save()
self.root.quit()
# data_root = Path("/home/orel/Storage/Data/K6/2020-03-26/Down")
#data_root = Path("/mnt/storage2/shuki/data/THEMIS/0015")
# landmark_file = data_root/'2020-03-23'/'Down'/'0008DeepCut_resnet50_Down2May25shuffle1_1030000.h5'
# video_file = data_root/'2020-03-23'/'Down'/'0008.MP4'
def __main__():
print(os.getcwd())
data_root = Path("/mnt/Storage1/Data/K7/")
root = tk.Tk()
vid_dir = list(data_root.glob('2020-*/Down/'))[5]
print(vid_dir)
video = LandmarksVideo(vid_dir, include_landmarks=True)
print(video.fps)
app = App(root, video, save_file='data/robust_triplets1.csv')
root.mainloop()
if __name__ == '__main__':
__main__()
| [
"shukistern@gmail.com"
] | shukistern@gmail.com |
b039e6acf62b07e17f702ba4b667476922ffc9f5 | ea003fc657c1da911d3389bd627459f7460335f7 | /wagtail_env/lib/python3.5/site-packages/wagtail/core/middleware.py | 3c0181223bd3631ccbcbd22bd2e602db25bc6ad0 | [] | no_license | saikrishnasri/wagtail_project | db839c393ab48001947bd437bd2d7606cd6ed7c0 | f749813943871bd57951636259d451464be581be | refs/heads/main | 2023-02-26T23:59:23.063936 | 2021-02-03T12:14:29 | 2021-02-03T12:14:29 | 335,604,456 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 857 | py | import warnings
from django.utils.deprecation import MiddlewareMixin
from wagtail.core.models import Site
from wagtail.utils.deprecation import RemovedInWagtail211Warning
warnings.warn(
'wagtail.core.middleware.SiteMiddleware and the use of request.site is deprecated. '
'Please update your code to use Site.find_for_request(request) in place of request.site, '
'and remove wagtail.core.middleware.SiteMiddleware from MIDDLEWARE',
RemovedInWagtail211Warning
)
class SiteMiddleware(MiddlewareMixin):
def process_request(self, request):
"""
Set request.site to contain the Site object responsible for handling this request,
according to hostname matching rules
"""
try:
request.site = Site.find_for_request(request)
except Site.DoesNotExist:
request.site = None
| [
"saikrishnakerla@gmail.com"
] | saikrishnakerla@gmail.com |
8f6d37f5508564a602db569ef8a1464aa1e84a87 | 53c983bbae20ec053ac6edfacbdf8f85b304911b | /tfomics/model_custom.py | eb243b858ee904170acc8ffac5de6836d65adb24 | [
"MIT"
] | permissive | p-koo/tfomics | 4efcad5c94c77a2522e004d9eb169aead893118a | 3db5e7ae7fd379b3c1a26f693504e28d4e3dceb0 | refs/heads/master | 2022-06-17T09:57:29.843982 | 2021-12-05T03:48:43 | 2021-12-05T03:48:43 | 269,372,383 | 4 | 7 | MIT | 2022-05-10T21:23:36 | 2020-06-04T13:50:15 | Python | UTF-8 | Python | false | false | 2,382 | py | from tensorflow import keras
import tensorflow as tf
import numpy as np
class CustomModel(keras.Model):
""" Example of a custom model in keras """
def train_step(self, data):
# Unpack the data. Its structure depends on your model and
# on what you pass to `fit()`.
if len(data) == 3:
x, y, sample_weight = data
else:
x, y = data
sample_weight=None
with tf.GradientTape() as tape:
y_pred = self(x, training=True) # Forward pass
# Compute the loss value.
# The loss function is configured in `compile()`.
loss = self.compiled_loss(
y,
y_pred,
sample_weight=sample_weight,
regularization_losses=self.losses,
)
# Compute gradients
trainable_vars = self.trainable_variables
gradients = tape.gradient(loss, trainable_vars)
# Update weights
self.optimizer.apply_gradients(zip(gradients, trainable_vars))
# Update the metrics.
# Metrics are configured in `compile()`.
self.compiled_metrics.update_state(y, y_pred, sample_weight=sample_weight)
# Return a dict mapping metric names to current value.
# Note that it will include the loss (tracked in self.metrics).
return {m.name: m.result() for m in self.metrics}
def test_step(self, data):
# Unpack the data
x, y = data
# Compute predictions
y_pred = self(x, training=False)
# Updates the metrics tracking the loss
self.compiled_loss(y, y_pred, regularization_losses=self.losses)
# Update the metrics.
self.compiled_metrics.update_state(y, y_pred)
# Return a dict mapping metric names to current value.
# Note that it will include the loss (tracked in self.metrics).
return {m.name: m.result() for m in self.metrics}
#@property
#def metrics(self):
# We list our `Metric` objects here so that `reset_states()` can be
# called automatically at the start of each epoch
# or at the start of `evaluate()`.
# If you don't implement this property, you have to call
# `reset_states()` yourself at the time of your choosing.
#return [loss_tracker, mae_metric]
| [
"koolaboratory.gmail.com"
] | koolaboratory.gmail.com |
90a81f052f088c36ebd6b1e883906c583d2493e2 | 96b55c342ca2269270d7f6623c050fac10651aa7 | /valid-palindrome.py | 3267eaad299a06784671bc60cecebf642d768614 | [] | no_license | nancy-cai/Data-Structure-and-Algorithm-in-Python | bbb5e54b9c8fc23669b8e4df9daeb1ee0480ff39 | 84258f5d848d1d9aed5a0ea98af0a06b70b716f4 | refs/heads/master | 2022-07-19T04:04:27.861406 | 2022-06-24T06:49:02 | 2022-06-24T06:49:02 | 199,798,353 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 927 | py | # https://leetcode.com/problems/valid-palindrome/
# def isPalindrome(s):
# trimmedS = ''.join(c for c in s if c.isalnum()).lower()
# reversedString = ""
# for i in range(len(trimmedS),0,-1):
# reversedString += trimmedS[i-1]
# print(reversedString)
# print (trimmedS)
# if trimmedS == reversedString:
# return True
# else:
# return False
def isPalindrome(s):
newStr = ""
for c in s:
if c.isalnum():
newStr += c.lower()
return newStr == newStr[::-1]
print(isPalindrome("A man, a plan, a canal: Panama"))
# Fastest solution, O(n)
def isPalindromeTwoPointers(s):
l = 0
r = len(s)-1
while l <= r:
if not s[l].isalnum():
l += 1
elif not s[r].isalnum():
r -= 1
else:
if s[l].lower() == s[r].lower():
l += 1
r -= 1
else:
return False
return True
print(isPalindromeTwoPointers("A man, a plan,,^ a canal: Panama"))
| [
"noreply@github.com"
] | noreply@github.com |
6741190d20ae7a5afdcc23341c8f9c53faa6d1a6 | 8ca474cb495707701d3b76f6bd8c164d4bcd969e | /tests/js/test_expressions.py | a2e5d483b0df7747fcc83b26e6942dee514be598 | [] | permissive | Mozilla-GitHub-Standards/5efbfd0162bc80fa9a50bce7e607072e3a3149a22eeab6689e7c79585548f2a0 | e305a96e0ac8eeec950bbc2ec4850a82a10c1281 | beb65754cc2cfa757cc3330dbdd8ea9223eca268 | refs/heads/master | 2020-05-02T18:15:30.620444 | 2019-03-28T04:06:04 | 2019-03-28T04:06:04 | 178,124,027 | 0 | 0 | BSD-3-Clause | 2019-03-28T04:06:06 | 2019-03-28T04:06:03 | Python | UTF-8 | Python | false | false | 3,128 | py | from nose.tools import eq_
from js_helper import TestCase
class TestBinaryOperators(TestCase):
"""Test that all of the binary operators in JS work as expected."""
def do_expr(self, expr, output):
self.setUp()
self.run_script("var x = %s" % expr)
self.assert_var_eq("x", output)
def test_boolean_comp(self):
yield self.do_expr, "false < true", True
yield self.do_expr, "true > false", True
yield self.do_expr, "false > true", False
yield self.do_expr, "true < false", False
yield self.do_expr, "false < false", False
yield self.do_expr, "true < true", False
yield self.do_expr, "true == true", True
yield self.do_expr, "false == false", True
yield self.do_expr, "true > 0", True
yield self.do_expr, "true == 1", True
yield self.do_expr, "false < 1", True
yield self.do_expr, "false == 0", True
def test_string_comp(self):
yield self.do_expr, '"string" < "string"', False
yield self.do_expr, '"astring" < "string"', True
yield self.do_expr, '"strings" < "stringy"', True
yield self.do_expr, '"strings" < "stringier"', False
yield self.do_expr, '"string" < "astring"', False
yield self.do_expr, '"string" < "strings"', True
# We can assume that the converses are true; Spidermonkey makes that
# easy.
def test_signed_zero_comp(self):
yield self.do_expr, "false < true", True
yield self.do_expr, "true > false", True
yield self.do_expr, "false > true", False
def test_signed_zero(self):
yield self.do_expr, "0 == 0", True
yield self.do_expr, "0 != 0", False
yield self.do_expr, "0 == -0", True
yield self.do_expr, "0 != -0", False
yield self.do_expr, "-0 == 0", True
yield self.do_expr, "-0 != 0", False
def test_typecasting(self):
yield self.do_expr, "1 == '1'", True
yield self.do_expr, "255 == '0xff'", True
yield self.do_expr, "0 == '\\r'", True
def test_additive_typecasting(self):
self.run_script("""
var first = true,
second = "foo",
third = 345;
var a = first + second,
b = second + first,
c = Boolean(true) + String("foo"),
d = String("foo") + Boolean(false),
e = second + third,
f = String("foo") + Number(-100);
""")
self.assert_var_eq("a", "truefoo")
self.assert_var_eq("b", "footrue")
self.assert_var_eq("c", "truefoo")
self.assert_var_eq("d", "foofalse")
self.assert_var_eq("e", "foo345")
self.assert_var_eq("f", "foo-100")
def test_addition_expressions(self):
self.run_script("""
var a = true + false,
b = Boolean(true) + Boolean(false);
var x = 100,
y = -1;
var c = x + y,
d = Number(x) + Number(y);
""")
self.assert_var_eq("a", 1)
self.assert_var_eq("b", 1)
self.assert_var_eq("c", 99)
self.assert_var_eq("d", 99)
| [
"me@mattbasta.com"
] | me@mattbasta.com |
d574e853ce03ff5093d2f5b1f586476ab7c17043 | 17c897fa76df9fb5d8106649327d68f54ed7ef40 | /Python/Flask/python_stack/flask_fundamentals/flask_mysql/server.py | c948051547e0f0bd9f20daeb024b21813ff2b4bf | [] | no_license | gitRobV/DojoAssignments | 5c23b75b2d8d57c92b4eccb04395091d1cf3127a | 472d13c0461847973abc57cac345047811066157 | refs/heads/master | 2021-01-21T20:11:56.534898 | 2017-06-19T04:48:54 | 2017-06-19T04:48:54 | 92,205,684 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,696 | py | from flask import Flask, render_template, request, redirect, session, flash
from mysqlconnection import MySQLConnector
import re
from validation import Validation
app = Flask(__name__)
app.secret_key = 'This is some secure issh'
mysql = MySQLConnector(app, 'registration')
@app.route('/')
def index():
query = "SELECT * FROM users"
users = mysql.query_db(query)
return render_template('index.html', users = users)
@app.route('/users/<user_id>')
def user(user_id):
query = 'SELECT * FROM users WHERE id = :specific_id'
data = {
'specific_id': user_id
}
user = mysql.query_db(query, data)
return render_template('/index.html', user = user[0])
@app.route('/users', methods=['POST'])
def add_user():
request_form = [
('alpha','first_name', request.form['first_name']),
('alpha','last_name', request.form['last_name']),
('email','email', request.form['email']),
('pass_check','password', request.form['password'],request.form['confirm_password'], 8,16)
]
sanitize = Validation(request_form)
if len(sanitize.errors) == 0:
query = 'INSERT INTO users (first_name, last_name, email, password, created_at, updated_at) VALUES ( :first_name, :last_name, :email, :password, now(), now())'
data = sanitize.data
user_id = mysql.query_db(query, data)
flash("You have successfully registered! You user ID is " + str(user_id) + ".")
if 'data' in session:
session.pop('data')
return redirect('/')
else:
session['data'] = sanitize.data
for error in sanitize.errors:
flash(error)
return redirect('/')
app.run(debug=True)
| [
"robertv1979@gmail.com"
] | robertv1979@gmail.com |
a07c34f83feb8c45f87b09c7e1834de971aaafb7 | 65306b41168a5afa6fc80904cc0fbf737939a01a | /scale/recipe/seed/recipe_connection.py | 6c80d4edab2353b587342b6305fb77cb63e50ceb | [
"LicenseRef-scancode-free-unknown",
"Apache-2.0",
"LicenseRef-scancode-public-domain"
] | permissive | kfconsultant/scale | 9e5df45cd36211d1bc5e946cf499a4584a2d71de | 28618aee07ceed9e4a6eb7b8d0e6f05b31d8fd6b | refs/heads/master | 2020-12-07T00:04:37.737556 | 2020-01-06T12:57:03 | 2020-01-06T12:57:03 | 232,587,229 | 0 | 0 | Apache-2.0 | 2020-01-08T14:53:36 | 2020-01-08T14:53:35 | null | UTF-8 | Python | false | false | 10,510 | py | """Defines connections that will provide data to execute recipes"""
from __future__ import unicode_literals
from recipe.configuration.data.exceptions import InvalidRecipeConnection
from recipe.configuration.data.recipe_data import ValidationWarning
from storage.media_type import UNKNOWN_MEDIA_TYPE
class RecipeConnection(object):
"""Represents a connection that will provide data to execute recipes. This class contains the necessary description
needed to ensure the data provided by the connection will be sufficient to execute the given recipe.
"""
def __init__(self):
"""Constructor
"""
self.param_names = set()
self.properties = []
self.files = {} # Param name -> (multiple, media types, optional)
self.workspace = False
def add_input_file(self, file_name, multiple, media_types, optional):
"""Adds a new file parameter to this connection
:param file_name: The file parameter name
:type file_name: str
:param multiple: Whether the file parameter provides multiple files (True)
:type multiple: bool
:param media_types: The possible media types of the file parameter (unknown if None or [])
:type media_types: list of str
:param optional: Whether the file parameter is optional and may not be provided (True)
:type optional: bool
"""
if file_name in self.param_names:
raise Exception('Connection already has a parameter named %s' % file_name)
if not media_types:
media_types = [UNKNOWN_MEDIA_TYPE]
self.param_names.add(file_name)
self.files[file_name] = (multiple, media_types, optional)
def add_property(self, property_name):
"""Adds a new property parameter to this connection
:param property_name: The property parameter name
:type property_name: str
"""
if property_name in self.param_names:
raise Exception('Connection already has a parameter named %s' % property_name)
self.param_names.add(property_name)
self.properties.append(property_name)
def add_workspace(self):
"""Indicates that this connection provides a workspace for storing output files
"""
self.workspace = True
def has_workspace(self):
"""Indicates whether this connection provides a workspace for storing output files
:returns: True if this connection provides a workspace, False otherwise
:rtype: bool
"""
return self.workspace
def validate_input_files(self, files):
"""Validates the given file parameters to make sure they are valid with respect to the recipe definition.
:param files: Dict of file parameter names mapped to a tuple with three items: whether the parameter is required
(True), if the parameter is for multiple files (True), and the description of the expected file meta-data
:type files: dict of str ->
tuple(bool, bool, :class:`job.configuration.interface.scale_file.ScaleFileDescription`)
:returns: A list of warnings discovered during validation.
:rtype: list[:class:`recipe.configuration.data.recipe_data.ValidationWarning`]
:raises :class:`recipe.configuration.data.exceptions.InvalidRecipeConnection`: If there is a configuration
problem
"""
warnings = []
for name in files:
required = files[name][0]
multiple = files[name][1]
file_desc = files[name][2]
if name not in self.files:
if required:
raise InvalidRecipeConnection('Data input %s is required and was not provided' % name)
continue
conn_file = self.files[name]
conn_multiple = conn_file[0]
conn_media_types = conn_file[1]
conn_optional = conn_file[2]
if conn_optional and required:
raise InvalidRecipeConnection('Data input %s is required and data from connection is optional' % name)
if not multiple and conn_multiple:
raise InvalidRecipeConnection('Data input %s only accepts a single file' % name)
for conn_media_type in conn_media_types:
if not file_desc.is_media_type_allowed(conn_media_type):
warn = ValidationWarning('media_type',
'Invalid media type for data input: %s -> %s' % (name, conn_media_type))
warnings.append(warn)
return warnings
def validate_input_json(self, property_names):
"""Validates the given property names to make sure all properties exist if they are required.
:param property_names: Dict of property names mapped to a bool indicating if they are required
:type property_names: dict of str -> bool
:returns: A list of warnings discovered during validation.
:rtype: list[:class:`recipe.configuration.data.recipe_data.ValidationWarning`]
:raises :class:`recipe.configuration.data.exceptions.InvalidRecipeConnection`: If there is a configuration
problem
"""
warnings = []
for name in property_names:
if name not in self.properties and property_names[name]:
raise InvalidRecipeConnection('Property %s is required and was not provided' % name)
return warnings
class SeedRecipeConnection(object):
"""Represents a connection that will provide data to execute jobs. This class contains the necessary description
needed to ensure the data provided by the connection will be sufficient to execute the given job.
"""
def __init__(self):
"""Constructor
"""
self.param_names = set()
self.properties = []
self.files = {} # Param name -> (multiple, media types, optional)
self.workspace = False
def add_input_file(self, file_name, multiple, media_types, optional, partial):
"""Adds a new file parameter to this connection
:param file_name: The file parameter name
:type file_name: str
:param multiple: Whether the file parameter provides multiple files (True)
:type multiple: bool
:param media_types: The possible media types of the file parameter (unknown if None or [])
:type media_types: list of str
:param optional: Whether the file parameter is optional and may not be provided (True)
:type optional: bool
:param partial: Flag indicating if the parameter only requires a small portion of the file
:type partial: bool
"""
if file_name in self.param_names:
raise Exception('Connection already has a parameter named %s' % file_name)
if not media_types:
media_types = [UNKNOWN_MEDIA_TYPE]
self.param_names.add(file_name)
self.files[file_name] = (multiple, media_types, optional, partial)
def add_property(self, property_name):
"""Adds a new property parameter to this connection
:param property_name: The property parameter name
:type property_name: str
"""
if property_name in self.param_names:
raise Exception('Connection already has a parameter named %s' % property_name)
self.param_names.add(property_name)
self.properties.append(property_name)
def add_workspace(self):
"""Indicates that this connection provides a workspace for storing output files
"""
self.workspace = True
def has_workspace(self):
"""Indicates whether this connection provides a workspace for storing output files
:returns: True if this connection provides a workspace, False otherwise
:rtype: bool
"""
return self.workspace
def validate_input_files(self, files):
"""Validates the given file parameters to make sure they are valid with respect to the job interface.
:param files: List of file inputs
:type files: [:class:`job.seed.types.SeedInputFiles`]
:returns: A list of warnings discovered during validation.
:rtype: list[:class:`job.configuration.data.job_data.ValidationWarning`]
:raises :class:`job.configuration.data.exceptions.InvalidConnection`: If there is a configuration problem.
"""
warnings = []
for file in files:
if file.name not in self.files:
if file.required:
raise InvalidRecipeConnection('Data input %s is required and was not provided' % file.name)
continue
conn_file = self.files[file.name]
conn_multiple = conn_file[0]
conn_media_types = conn_file[1]
conn_optional = conn_file[2]
if conn_optional:
if file.required:
raise InvalidRecipeConnection('Data input %s is required and data from connection is optional' %
file.name)
if not file.multiple and conn_multiple:
raise InvalidRecipeConnection('Data input %s only accepts a single file' % file.name)
for conn_media_type in conn_media_types:
if not file.is_media_type_allowed(conn_media_type):
warn = ValidationWarning('media_type',
'Invalid media type for data input: %s -> %s' %
(file.name, conn_media_type))
warnings.append(warn)
return warnings
def validate_properties(self, property_names):
"""Validates the given property names to make sure all properties exist if they are required.
:param property_names: Dict of property names mapped to a bool indicating if they are required
:type property_names: dict of str -> bool
:returns: A list of warnings discovered during validation.
:rtype: list[:class:`job.configuration.data.job_data.ValidationWarning`]
:raises :class:`job.configuration.data.exceptions.InvalidConnection`: If there is a configuration problem.
"""
warnings = []
for name in property_names:
if name not in self.properties and property_names[name]:
raise InvalidRecipeConnection('Property %s is required and was not provided' % name)
return warnings
| [
"jon@gisjedi.com"
] | jon@gisjedi.com |
1816bfc1c36708cf3b437c0c359eb6f79ae3e8ae | 4b27375eac64d804255cbaadecb109361472f902 | /nets.py | 2a3a7b4c84352592699bd8f4e478c0eaa479c2df | [] | no_license | QhelDIV/Two-Step-Disentanglement-Method | 04b3e0c645c870ebef9068388a6544d8c0ef89f1 | 5f825ece867d45bb93612df59aa84b139456e1eb | refs/heads/master | 2020-03-14T21:38:37.710811 | 2018-07-24T02:58:39 | 2018-07-24T02:58:39 | 131,801,402 | 6 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,593 | py | # some setup code
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn import init
from config import *
import layers
from params import *
def Encoder(img_size, in_channel, conv_channel, filter_size, latent_dim, dense_size, bn):
inner_conv_channel = conv_channel//2
if img_size%4 != 0:
print("WARNING: image size mod 4 != 0, may produce bug.")
# total input number of the input of the last conv layer, new image size = old / 2 / 2
flatten_img_size = inner_conv_channel * img_size/4 * img_size/4
# explain: first two layer's padding = 2, because we set W/S = W/S + floor((-F+2P)/S+1), S=2,F=5,so P=2
if VERBOSE:
print(img_size, in_channel, conv_channel, filter_size, latent_dim, bn)
model = nn.Sequential(
layers.ConvLayer(in_channel, conv_channel, filter_size, stride=2, padding = 2, bn=bn),
layers.ConvLayer(conv_channel, inner_conv_channel, filter_size, stride=2, padding = 2, bn=bn),
layers.ConvLayer(inner_conv_channel,inner_conv_channel, filter_size, stride=1, padding = 2, bn=bn),
layers.Flatten(),
layers.Dense(flatten_img_size, dense_size),
layers.Dense(dense_size, latent_dim)
)
model = model.to(device=device, dtype=dtype)
model = torch.nn.DataParallel(model, device_ids = GPU_IDs)
return model
def Classifier(input_dim, dense_size, s_classes, bn):
model = nn.Sequential(
layers.Dense(input_dim, dense_size, bn=bn),
layers.Dense(dense_size, dense_size, bn=bn),
layers.Dense(dense_size, s_classes, bn=bn)
)
model = model.to(device=device, dtype=dtype)
model = torch.nn.DataParallel(model, device_ids = GPU_IDs)
return model
def Decoder(s_dim, z_dim, img_size, img_channel, conv_channel, filter_size, dense_size, bn):
# TODO
# essentially the mirror version of Encoder
inner_conv_channel = conv_channel//2
back_img_size = img_size//4
flatten_img_size = inner_conv_channel * back_img_size * back_img_size
input_dim = s_dim + z_dim
pad = int(np.floor(filter_size/2)) # chose pad this way to fullfill floor((-F+2P)/1+1)==0
model = nn.Sequential(
layers.Dense(input_dim, dense_size),
layers.Dense(dense_size, inner_conv_channel*back_img_size*back_img_size),
layers.Reshape((-1, inner_conv_channel, back_img_size, back_img_size)),
layers.ConvLayer(inner_conv_channel, inner_conv_channel, filter_size, stride=1, padding=pad, bn=bn, upsampling=True),
layers.ConvLayer(inner_conv_channel, conv_channel, filter_size, stride=1, padding=pad, bn=bn, upsampling=True),
layers.ConvLayer(conv_channel, img_channel, filter_size, stride=1, padding=pad, bn=bn, upsampling=False),
)
model = model.to(device=device, dtype=dtype)
model = torch.nn.DataParallel(model, device_ids = GPU_IDs)
return model
def AdvLayer(input_dim, dense_size, s_classes, bn):
# same structure as Classifier
return Classifier(input_dim, dense_size, s_classes, bn)
def S_Encoder(params):
conv_channel= params.enc_conv_channel
filter_size = params.enc_conv_filter_size
img_size = params.img_size
in_channel = params.img_channel
dense_size = params.encdec_dense_size
bn = params.s_enc_bn
latent_dim = params.s_enc_dim
model = Encoder(img_size, in_channel, conv_channel, filter_size, latent_dim, dense_size, bn)
model.m_name='s_enc'
return model
def Z_Encoder(params):
conv_channel= params.enc_conv_channel
filter_size = params.enc_conv_filter_size
img_size = params.img_size
in_channel = params.img_channel
dense_size = params.encdec_dense_size
bn = params.z_enc_bn
latent_dim = params.z_enc_dim
model = Encoder(img_size, in_channel, conv_channel, filter_size, latent_dim, dense_size, bn)
model.m_name='z_enc'
return model
def S_Classifier(params):
input_dim = params.s_enc_dim
dense_size = params.classifier_dense_size
classes_num = params.classes_num
bn = params.classifier_use_bn
model = Classifier(input_dim, dense_size, classes_num, bn)
model.m_name='s_classifier'
return model
def Z_AdvLayer(params):
input_dim = params.z_enc_dim
dense_size = params.classifier_dense_size
classes_num = params.classes_num
bn = params.classifier_use_bn
model = AdvLayer(input_dim, dense_size, classes_num, bn)
return model
def SZ_Decoder(params):
s_dim = params.s_enc_dim
z_dim = params.z_enc_dim
img_size = params.img_size
img_channel = params.img_channel
conv_channel = params.dec_conv_channel
filter_size = params.dec_conv_filter_size
dense_size = params.encdec_dense_size
bn = params.dec_use_bn
model = Decoder(s_dim, z_dim, img_size, img_channel, conv_channel, filter_size, dense_size, bn)
model.m_name='sz_dec'
return model
def test_Encoder(model,params):
x = torch.zeros((64, params.img_channel, params.img_size, params.img_size), dtype=dtype)
scores = model(x)
print(scores.size()) # you should see [64, latent_dim]
print()
def test_classifier(model,params):
x = torch.zeros((64, params.s_enc_dim), dtype=dtype)
scores = model(x)
print(scores.size()) # should see [64,classes_num]
print()
def test_Decoder(model,params):
x = torch.zeros((64, params.s_enc_dim + params.z_enc_dim), dtype=dtype)
scores = model(x)
print(scores.size()) # should see [64,classes_num]
print() | [
"qheldiv@gmail.com"
] | qheldiv@gmail.com |
1d5b8ef75898cbe899d0481aaac82b653ed75e32 | 962db47068ceac61a6a8f207ddfe489d5d68880b | /python/services/compute/subnetwork.py | 75725ab83725096d3e8c9579f65470cfc84071ab | [
"Apache-2.0"
] | permissive | isabella232/declarative-resource-client-library | 8016eeff943188a658c1df849303e30aa78f3dd0 | 2149dbf673cfa2cdbeca242b7e4d2bb0e7ea050d | refs/heads/main | 2023-03-06T17:49:13.775734 | 2021-02-09T23:41:07 | 2021-02-09T23:43:18 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 15,140 | py | # Copyright 2021 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from connector import channel
from google3.cloud.graphite.mmv2.services.google.compute import subnetwork_pb2
from google3.cloud.graphite.mmv2.services.google.compute import subnetwork_pb2_grpc
from typing import List
class Subnetwork(object):
def __init__(
self,
creation_timestamp: str = None,
description: str = None,
gateway_address: str = None,
ip_cidr_range: str = None,
name: str = None,
network: str = None,
fingerprint: str = None,
purpose: str = None,
role: str = None,
secondary_ip_range: list = None,
private_ip_google_access: bool = None,
region: str = None,
log_config: dict = None,
project: str = None,
self_link: str = None,
enable_flow_logs: bool = None,
service_account_file: str = "",
):
channel.initialize()
self.description = description
self.ip_cidr_range = ip_cidr_range
self.name = name
self.network = network
self.purpose = purpose
self.role = role
self.secondary_ip_range = secondary_ip_range
self.private_ip_google_access = private_ip_google_access
self.region = region
self.log_config = log_config
self.project = project
self.enable_flow_logs = enable_flow_logs
self.service_account_file = service_account_file
def apply(self):
stub = subnetwork_pb2_grpc.ComputeSubnetworkServiceStub(channel.Channel())
request = subnetwork_pb2.ApplyComputeSubnetworkRequest()
if Primitive.to_proto(self.description):
request.resource.description = Primitive.to_proto(self.description)
if Primitive.to_proto(self.ip_cidr_range):
request.resource.ip_cidr_range = Primitive.to_proto(self.ip_cidr_range)
if Primitive.to_proto(self.name):
request.resource.name = Primitive.to_proto(self.name)
if Primitive.to_proto(self.network):
request.resource.network = Primitive.to_proto(self.network)
if SubnetworkPurposeEnum.to_proto(self.purpose):
request.resource.purpose = SubnetworkPurposeEnum.to_proto(self.purpose)
if SubnetworkRoleEnum.to_proto(self.role):
request.resource.role = SubnetworkRoleEnum.to_proto(self.role)
if SubnetworkSecondaryIPRangeArray.to_proto(self.secondary_ip_range):
request.resource.secondary_ip_range.extend(
SubnetworkSecondaryIPRangeArray.to_proto(self.secondary_ip_range)
)
if Primitive.to_proto(self.private_ip_google_access):
request.resource.private_ip_google_access = Primitive.to_proto(
self.private_ip_google_access
)
if Primitive.to_proto(self.region):
request.resource.region = Primitive.to_proto(self.region)
if SubnetworkLogConfig.to_proto(self.log_config):
request.resource.log_config.CopyFrom(
SubnetworkLogConfig.to_proto(self.log_config)
)
else:
request.resource.ClearField("log_config")
if Primitive.to_proto(self.project):
request.resource.project = Primitive.to_proto(self.project)
if Primitive.to_proto(self.enable_flow_logs):
request.resource.enable_flow_logs = Primitive.to_proto(
self.enable_flow_logs
)
request.service_account_file = self.service_account_file
response = stub.ApplyComputeSubnetwork(request)
self.creation_timestamp = Primitive.from_proto(response.creation_timestamp)
self.description = Primitive.from_proto(response.description)
self.gateway_address = Primitive.from_proto(response.gateway_address)
self.ip_cidr_range = Primitive.from_proto(response.ip_cidr_range)
self.name = Primitive.from_proto(response.name)
self.network = Primitive.from_proto(response.network)
self.fingerprint = Primitive.from_proto(response.fingerprint)
self.purpose = SubnetworkPurposeEnum.from_proto(response.purpose)
self.role = SubnetworkRoleEnum.from_proto(response.role)
self.secondary_ip_range = SubnetworkSecondaryIPRangeArray.from_proto(
response.secondary_ip_range
)
self.private_ip_google_access = Primitive.from_proto(
response.private_ip_google_access
)
self.region = Primitive.from_proto(response.region)
self.log_config = SubnetworkLogConfig.from_proto(response.log_config)
self.project = Primitive.from_proto(response.project)
self.self_link = Primitive.from_proto(response.self_link)
self.enable_flow_logs = Primitive.from_proto(response.enable_flow_logs)
def hcl(self):
stub = subnetwork_pb2_grpc.ComputeSubnetworkServiceStub(channel.Channel())
request = subnetwork_pb2.ComputeSubnetworkAsHclRequest()
if Primitive.to_proto(self.description):
request.resource.description = Primitive.to_proto(self.description)
if Primitive.to_proto(self.ip_cidr_range):
request.resource.ip_cidr_range = Primitive.to_proto(self.ip_cidr_range)
if Primitive.to_proto(self.name):
request.resource.name = Primitive.to_proto(self.name)
if Primitive.to_proto(self.network):
request.resource.network = Primitive.to_proto(self.network)
if SubnetworkPurposeEnum.to_proto(self.purpose):
request.resource.purpose = SubnetworkPurposeEnum.to_proto(self.purpose)
if SubnetworkRoleEnum.to_proto(self.role):
request.resource.role = SubnetworkRoleEnum.to_proto(self.role)
if SubnetworkSecondaryIPRangeArray.to_proto(self.secondary_ip_range):
request.resource.secondary_ip_range.extend(
SubnetworkSecondaryIPRangeArray.to_proto(self.secondary_ip_range)
)
if Primitive.to_proto(self.private_ip_google_access):
request.resource.private_ip_google_access = Primitive.to_proto(
self.private_ip_google_access
)
if Primitive.to_proto(self.region):
request.resource.region = Primitive.to_proto(self.region)
if SubnetworkLogConfig.to_proto(self.log_config):
request.resource.log_config.CopyFrom(
SubnetworkLogConfig.to_proto(self.log_config)
)
else:
request.resource.ClearField("log_config")
if Primitive.to_proto(self.project):
request.resource.project = Primitive.to_proto(self.project)
if Primitive.to_proto(self.enable_flow_logs):
request.resource.enable_flow_logs = Primitive.to_proto(
self.enable_flow_logs
)
response = stub.ComputeSubnetworkAsHcl(request)
return response.hcl
@classmethod
def delete(self, project, region, name, service_account_file=""):
stub = subnetwork_pb2_grpc.ComputeSubnetworkServiceStub(channel.Channel())
request = subnetwork_pb2.DeleteComputeSubnetworkRequest()
request.service_account_file = service_account_file
request.Project = project
request.Region = region
request.Name = name
response = stub.DeleteComputeSubnetwork(request)
@classmethod
def list(self, project, region, service_account_file=""):
stub = subnetwork_pb2_grpc.ComputeSubnetworkServiceStub(channel.Channel())
request = subnetwork_pb2.ListComputeSubnetworkRequest()
request.service_account_file = service_account_file
request.Project = project
request.Region = region
return stub.ListComputeSubnetwork(request).items
@classmethod
def from_any(self, any_proto):
# Marshal any proto to regular proto.
res_proto = subnetwork_pb2.ComputeSubnetwork()
any_proto.Unpack(res_proto)
res = Subnetwork()
res.creation_timestamp = Primitive.from_proto(res_proto.creation_timestamp)
res.description = Primitive.from_proto(res_proto.description)
res.gateway_address = Primitive.from_proto(res_proto.gateway_address)
res.ip_cidr_range = Primitive.from_proto(res_proto.ip_cidr_range)
res.name = Primitive.from_proto(res_proto.name)
res.network = Primitive.from_proto(res_proto.network)
res.fingerprint = Primitive.from_proto(res_proto.fingerprint)
res.purpose = SubnetworkPurposeEnum.from_proto(res_proto.purpose)
res.role = SubnetworkRoleEnum.from_proto(res_proto.role)
res.secondary_ip_range = SubnetworkSecondaryIPRangeArray.from_proto(
res_proto.secondary_ip_range
)
res.private_ip_google_access = Primitive.from_proto(
res_proto.private_ip_google_access
)
res.region = Primitive.from_proto(res_proto.region)
res.log_config = SubnetworkLogConfig.from_proto(res_proto.log_config)
res.project = Primitive.from_proto(res_proto.project)
res.self_link = Primitive.from_proto(res_proto.self_link)
res.enable_flow_logs = Primitive.from_proto(res_proto.enable_flow_logs)
return res
class SubnetworkSecondaryIPRange(object):
def __init__(self, range_name: str = None, ip_cidr_range: str = None):
self.range_name = range_name
self.ip_cidr_range = ip_cidr_range
@classmethod
def to_proto(self, resource):
if not resource:
return None
res = subnetwork_pb2.ComputeSubnetworkSecondaryIPRange()
if Primitive.to_proto(resource.range_name):
res.range_name = Primitive.to_proto(resource.range_name)
if Primitive.to_proto(resource.ip_cidr_range):
res.ip_cidr_range = Primitive.to_proto(resource.ip_cidr_range)
return res
@classmethod
def from_proto(self, resource):
if not resource:
return None
return SubnetworkSecondaryIPRange(
range_name=resource.range_name, ip_cidr_range=resource.ip_cidr_range,
)
class SubnetworkSecondaryIPRangeArray(object):
@classmethod
def to_proto(self, resources):
if not resources:
return resources
return [SubnetworkSecondaryIPRange.to_proto(i) for i in resources]
@classmethod
def from_proto(self, resources):
return [SubnetworkSecondaryIPRange.from_proto(i) for i in resources]
class SubnetworkLogConfig(object):
def __init__(
self,
aggregation_interval: str = None,
flow_sampling: float = None,
metadata: str = None,
):
self.aggregation_interval = aggregation_interval
self.flow_sampling = flow_sampling
self.metadata = metadata
@classmethod
def to_proto(self, resource):
if not resource:
return None
res = subnetwork_pb2.ComputeSubnetworkLogConfig()
if SubnetworkLogConfigAggregationIntervalEnum.to_proto(
resource.aggregation_interval
):
res.aggregation_interval = SubnetworkLogConfigAggregationIntervalEnum.to_proto(
resource.aggregation_interval
)
if Primitive.to_proto(resource.flow_sampling):
res.flow_sampling = Primitive.to_proto(resource.flow_sampling)
if SubnetworkLogConfigMetadataEnum.to_proto(resource.metadata):
res.metadata = SubnetworkLogConfigMetadataEnum.to_proto(resource.metadata)
return res
@classmethod
def from_proto(self, resource):
if not resource:
return None
return SubnetworkLogConfig(
aggregation_interval=resource.aggregation_interval,
flow_sampling=resource.flow_sampling,
metadata=resource.metadata,
)
class SubnetworkLogConfigArray(object):
@classmethod
def to_proto(self, resources):
if not resources:
return resources
return [SubnetworkLogConfig.to_proto(i) for i in resources]
@classmethod
def from_proto(self, resources):
return [SubnetworkLogConfig.from_proto(i) for i in resources]
class SubnetworkPurposeEnum(object):
@classmethod
def to_proto(self, resource):
if not resource:
return resource
return subnetwork_pb2.ComputeSubnetworkPurposeEnum.Value(
"ComputeSubnetworkPurposeEnum%s" % resource
)
@classmethod
def from_proto(self, resource):
if not resource:
return resource
return subnetwork_pb2.ComputeSubnetworkPurposeEnum.Name(resource)[
len("ComputeSubnetworkPurposeEnum") :
]
class SubnetworkRoleEnum(object):
@classmethod
def to_proto(self, resource):
if not resource:
return resource
return subnetwork_pb2.ComputeSubnetworkRoleEnum.Value(
"ComputeSubnetworkRoleEnum%s" % resource
)
@classmethod
def from_proto(self, resource):
if not resource:
return resource
return subnetwork_pb2.ComputeSubnetworkRoleEnum.Name(resource)[
len("ComputeSubnetworkRoleEnum") :
]
class SubnetworkLogConfigAggregationIntervalEnum(object):
@classmethod
def to_proto(self, resource):
if not resource:
return resource
return subnetwork_pb2.ComputeSubnetworkLogConfigAggregationIntervalEnum.Value(
"ComputeSubnetworkLogConfigAggregationIntervalEnum%s" % resource
)
@classmethod
def from_proto(self, resource):
if not resource:
return resource
return subnetwork_pb2.ComputeSubnetworkLogConfigAggregationIntervalEnum.Name(
resource
)[len("ComputeSubnetworkLogConfigAggregationIntervalEnum") :]
class SubnetworkLogConfigMetadataEnum(object):
@classmethod
def to_proto(self, resource):
if not resource:
return resource
return subnetwork_pb2.ComputeSubnetworkLogConfigMetadataEnum.Value(
"ComputeSubnetworkLogConfigMetadataEnum%s" % resource
)
@classmethod
def from_proto(self, resource):
if not resource:
return resource
return subnetwork_pb2.ComputeSubnetworkLogConfigMetadataEnum.Name(resource)[
len("ComputeSubnetworkLogConfigMetadataEnum") :
]
class Primitive(object):
@classmethod
def to_proto(self, s):
if not s:
return ""
return s
@classmethod
def from_proto(self, s):
return s
| [
"hawk@google.com"
] | hawk@google.com |
150d0da8ecad01521adf1ca43020d90ad690807e | 219dbbd4c88eb3c323f2644fcd9c804975c58b78 | /dlux/dashboards/network/neutron_subnets/views.py | 36fe5652b0e9edb8880900550ae611865d032127 | [
"Apache-2.0"
] | permissive | mavenugo/dlux-horizon | a0e6dfc622cfc653acbd847952bc84e11d7209df | 07f06a75e9909fd103d3b35d7e666802979afb79 | refs/heads/master | 2021-04-26T14:04:58.273087 | 2014-02-02T16:53:42 | 2014-02-02T16:53:42 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,296 | py | # Copyright 2014 Hewlett-Packard Development Company, L.P.
#
# Author: Dave Tucker <dave.j.tucker@hp.com>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from horizon import tables as horizon_tables
from horizon import tabs as tabs
from dlux.api import get_client
from dlux.dashboards.network.neutron_subnets import tables
import dlux.dashboards.network.neutron_subnets.tabs as subnet_tabs
class IndexView(horizon_tables.DataTableView):
template_name = "network/neutron_subnets/index.html"
table_class = tables.NeutronSubnetsTable
def get_data(self):
client = get_client(self.request)
return client.neutron.subnets.list()
class DetailView(tabs.TabView):
tab_group_class = subnet_tabs.SubnetDetailTabs
template_name = 'network/neutron_subnets/detail.html'
| [
"dave@dtucker.co.uk"
] | dave@dtucker.co.uk |
77858201d3e474368bf11eb53a147fa09d12a3b1 | cc4269b6b2c7ca8a9869a36452237d22ae251754 | /backend/app/api/routes/root.py | a8dafb1437439f1968fd823ab8f07f72f777cfad | [] | no_license | AKCEJIEPATOP/peak_flow_meter | f6ef7ac559d6941d2e8b375ca3e2288ac2b18769 | ad4aedd61ddcc5027d052437ed3a7ec28670f9d7 | refs/heads/master | 2023-01-19T14:54:12.063563 | 2020-12-02T23:48:02 | 2020-12-02T23:48:02 | 297,611,999 | 0 | 0 | null | 2020-12-01T06:09:33 | 2020-09-22T10:17:40 | JavaScript | UTF-8 | Python | false | false | 210 | py | from fastapi import APIRouter
from . import user
router = APIRouter()
router.include_router(user.router, prefix='/user', tags=['User'])
@router.get('/ping')
async def ping():
return 'pong'
| [
"zhek26rus@google.com"
] | zhek26rus@google.com |
5b1b14b644ca7243cb161acdafa816e3aa462487 | e4f828eb6cfb775d9e8cb792902680f45a94470e | /10 Days of Statistics/Day-7/Python/02_Spearman's Rank Correlation Coefficient.py | ac2c0fd99f3dc0324363e825205b1c9ab1d6406b | [] | no_license | j471n/Hacker-Rank | 831a90412e86be1314a401143290fd6e843848be | 0a56107d413c5f661db59a3d1c3f700460ccb618 | refs/heads/master | 2022-12-29T22:33:51.759452 | 2020-10-05T08:04:51 | 2020-10-05T08:04:51 | 285,738,337 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 525 | py | class SPCC:
def spearmanscoeff(x, y, n):
# calculating the range of x and y.
rx = [sorted(x).index(_) + 1 for _ in x]
ry = [sorted(y).index(_) + 1 for _ in y]
# finding di^2 and coefficient.
di = [(x - y) ** 2 for (x, y) in zip(rx, ry)]
coeff = 1 - (6 * sum(di)) / (n ** 3 - n)
return coeff
if __name__ == '__main__':
n = int(input())
x, y = [list(map(float, input().split())) for _ in range(2)]
print(round(SPCC.spearmanscoeff(x, y, n), 3))
| [
"noreply@github.com"
] | noreply@github.com |
92aaa9f2c0851bde5ed7572fb8b8c62845c4c814 | f0d713996eb095bcdc701f3fab0a8110b8541cbb | /R4D59C9CQbJvqWaKd_6.py | ed52bb6e52badf15ab27956a07eb2844ef6a368d | [] | no_license | daniel-reich/turbo-robot | feda6c0523bb83ab8954b6d06302bfec5b16ebdf | a7a25c63097674c0a81675eed7e6b763785f1c41 | refs/heads/main | 2023-03-26T01:55:14.210264 | 2021-03-23T16:08:01 | 2021-03-23T16:08:01 | 350,773,815 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,240 | py | """
A baseball player's batting average is calculated by the following formula:
BA = (number of hits) / (number of official at-bats)
Batting averages are always expressed rounded to the nearest thousandth with
no leading zero. The top 3 MLB batting averages of all-time are:
1. Ty Cobb .366
2. Rogers Hornsby .358
3. Shoeless Joe Jackson .356
The given list represents a season of games. Each list item indicates a
player's `[hits, official at bats]` per game. Return a string with the
player's seasonal batting average rounded to the nearest thousandth.
### Examples
batting_avg([[0, 0], [1, 3], [2, 2], [0, 4], [1, 5]]) ➞ ".286"
batting_avg([[2, 5], [2, 3], [0, 3], [1, 5], [2, 4]]) ➞ ".350"
batting_avg([[2, 3], [1, 5], [2, 4], [1, 5], [0, 5]]) ➞ ".273"
### Notes
* The number of hits will not exceed the number of official at-bats.
* The list includes official at-bats only. No other plate-appearances (walks, hit-by-pitches, sacrifices, etc.) are included in the list.
* HINT: Think in terms of total hits and total at-bats.
"""
def batting_avg(lst):
x = str(round(sum(i[0] for i in lst)/sum(i[1] for i in lst),3))[1:]
if len(x) != 4:
x += '0'*(4 - len(x))
return x
| [
"daniel.reich@danielreichs-MacBook-Pro.local"
] | daniel.reich@danielreichs-MacBook-Pro.local |
efe09aef39889d38f44489e3f2c1b0308a60216d | 39c7c59020e823b3cff15537a8e336d4bcd8d801 | /getSolidotContentByDate.py | d45905d1b5d92bf11b9e965540d62e9026cbb830 | [] | no_license | glodsky/curlSolidotNewByDate | 7638a24797a96600432fc5fd683bf664ef031255 | 423c7cb1a7fdfe186bd6c574c0e7b7af945b6a64 | refs/heads/master | 2020-04-08T18:53:37.052279 | 2018-12-04T04:35:00 | 2018-12-04T04:35:00 | 159,629,447 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,493 | py | # -*- coding: utf-8 -*-
# python version : 3.6
import urllib.request
import pandas as pd
import os
import json
import datetime
import random
from lxml import etree
Proxies_POOLs =[]
def init_proxiesPOOLs(): #初始化IP代理池
global Proxies_POOLs
with open('./prxies_pools.csv','r') as f:
contents = f.readlines()
f.close()
num = len(contents)
for i in range(num):
details = contents[i].split(',')
proxy= {details[2].strip('\n') :"%s:%s"%(details[0],details[1])}
Proxies_POOLs.append(proxy)
def get_OneProxy(): # 随机化 返回一个代理IP
global Proxies_POOLs
proxyNums = len(Proxies_POOLs)
proxy = Proxies_POOLs[random.randint(0,proxyNums-1)]
#print(proxy)
return proxy
def use_proxy(url):
req=urllib.request.Request(url)
proxy_addr = None # get_OneProxy()
req.add_header("User-Agent","Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/49.0.2623.221 Safari/537.36 SE 2.X MetaSr 1.0")
proxy=urllib.request.ProxyHandler(proxy_addr)
opener=urllib.request.build_opener(proxy,urllib.request.HTTPHandler)
urllib.request.install_opener(opener)
data=urllib.request.urlopen(req).read().decode('utf-8','ignore')
return data
def get_OneDayInformation(url,peroid):
content=use_proxy(url)
#print (content)
day_news = []
try:
html = etree.HTML(content)
titles = html.xpath('//div[@class="bg_htit"]/h2/a/text()')
talk_times = html.xpath('//div[@id="center"]/div[@class="block_m"]/div[@class="talk_time"]/text()')
div_mainnews = html.xpath('//div[@id="center"]/div[@class="block_m"]/div[@class="p_content"]/div[@class="p_mainnew"]')
news = [[] for i in range(4)]
for title in titles:
news[0].append(title)
for talk_time in talk_times:
tt = talk_time.strip()
if tt !='\r\n' and tt !='':
news[1].append(tt)
for mainnew in div_mainnews:
mn = mainnew.xpath('string(.)')
news[2].append(mn)
#extract keywords and links
keywords = mainnew.xpath('.//a/text()')
keywords_link = mainnew.xpath('.//a/@href')
link_nums = len(keywords)
key_links = []
for k in range(link_nums):
key_links.append( {'keyword':keywords[k],'link':keywords_link[k]})
if len(key_links)>0:
news[3].append(key_links)
else:
news[3].append([])
#print('Length : news[0] = %s news[1] = %s news[2] = %s news[3]=%s'%(len(news[0]),len(news[1]),len(news[2]),len(news[3]) ))
nums = len(news[0])
if (nums>0):
print("Found %5d news"%nums)
else:
print("Not Found")
return 0
for i in range(nums):
day_news.append({'title':news[0][i],'talk_time':news[1][i],'mainnews':news[2][i],'keywords_links':news[3][i]})
#print("title:%s\ntalk_time: %s\nmainnewes: %s\n"%(news[0][i],news[1][i],news[2][i]))
print("%s\n%s\n%s"%(news[0][i],news[1][i],news[2][i]))
## print("详情请访问:")
## tar = news[3][i]
## for x in range(len(tar)):
## print("\t%s"%(tar[x]["link"]))
print("\n")
fname = './SolidotNews_%s.json'% peroid
with open(fname,'w',encoding='utf-8') as f:
f.write(str(day_news))
f.close()
except etree.ParserError as e:
print("At url=%s \nError type = %s"%(url,e ))
def get_NewsFromDateRange():
peroid_range = pd.period_range('11/01/2018','12/01/2018',freq='D')
for day in peroid_range:
url = "https://www.solidot.org/?issue=%s"%(str(day).replace('-',''))
print(url)
get_OneDayInformation(url)
def main():
init_proxiesPOOLs()
ResentDaysNews = 3 # '最近三天Solidot网站新闻 1 表示今天
ResentDaysNews_list = []
for i in range(ResentDaysNews):
ResentDaysNews_list.append( (datetime.datetime.now()+datetime.timedelta(days= -(i) )).strftime("%Y%m%d"))
print(ResentDaysNews_list)
for day in ResentDaysNews_list:
url = "https://www.solidot.org/?issue=%s"%(day)
get_OneDayInformation(url,day)
return
#get_NewsFromDateRange()
if __name__ == '__main__':
main()
| [
"noreply@github.com"
] | noreply@github.com |
ad9abe7412feb409a43dcc7ec48daed884bd01a3 | a9215e088517b6ad3e3a02de6c92ff082f0c8042 | /turtle_words/svg_turtle.py | 62354c5fb79c1d046fde239c6b774bbc5f6a0134 | [] | no_license | jacobhokanson/gc-sites | af563733a14db204d1e7a2f542ed803f64608858 | eab6bd923a0aedab7d8a10a2a109e781fdcb1b24 | refs/heads/master | 2022-07-04T03:20:42.171757 | 2020-05-09T18:31:35 | 2020-05-09T18:31:35 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,817 | py | class svg_image():
"""docstring for SVG image idea"""
def __init__(self, color1, color2):
self.svg_ml = '<g><path style="fill:' + color1 + '" d="M314.6,158.88c-3.15-2.43-6.08-4.44-8.7-6.8c-1.96-1.77-3.67-3.87-5.2-6.03c-4.79-6.77-9.24-13.79-14.2-20.43c-3.74-5.01-8.47-9.09-14.29-11.7c-4.42-1.98-8.99-2.54-13.8-1.73c-3.2,0.54-6.52,0.45-9.68,1.14c-4.39,0.96-8.68,2.39-13.01,3.63c-0.36,0.1-0.72,0.24-1.19,0.4c-0.35-0.85-0.56-1.72-1.02-2.44c-1.32-2.07-2.92-3.99-4.08-6.14c-1.14-2.11-1.58-4.61-2.84-6.62c-3.11-4.96-7.04-9.27-11.41-13.17c-0.94-0.84-1.91-1.65-2.99-2.58c3.85-5.42,7.25-11.15,12.68-15.24c3.75-2.83,8-3,12.38-2.41c6.68,0.9,12.9,3.5,19.35,5.27c16.52,4.54,28.71,14.83,38.35,28.5c8.96,12.7,15.45,26.57,18.66,41.83c0.79,3.78,0.96,7.69,1.33,11.54C315.02,156.75,314.75,157.65,314.6,158.88z"/><path style="fill:' + color1 + '" d="M80.2,117.17c-3-0.94-6.09-2.12-9.28-2.84c-3.68-0.82-7.45-1.3-11.19-1.79c-2.79-0.36-5.61-0.76-8.4-0.68c-6.46,0.18-11.83,3.14-16.62,7.24c-5.22,4.47-9.28,9.91-13.1,15.56c-1.85,2.73-3.75,5.45-5.34,8.34c-3.15,5.73-7.66,10.09-13.04,13.66c-0.91,0.61-1.78,1.28-2.93,2.11c-0.12-1.31-0.31-2.39-0.29-3.47c0.13-7.3,1.73-14.34,4.06-21.21c4.42-13.06,10.65-25.18,19.26-36.01c9.35-11.76,20.88-20.32,35.59-24.24c5.67-1.51,11.24-3.38,16.93-4.77c7.33-1.8,13.69-0.02,18.52,6.03c2.9,3.63,5.67,7.37,8.56,11.14c-1.11,0.97-2.02,1.75-2.92,2.54c-4.45,3.89-8.32,8.3-11.59,13.22c-0.8,1.2-1.56,2.52-1.91,3.9c-0.5,1.94-1.56,3.43-2.7,5.01C82.4,112.84,81.39,115.06,80.2,117.17z"/><path style="fill:' + color1 + '" d="M135.67,71.26c-0.29-2.3-0.59-4.9-0.96-7.48c-0.08-0.53-0.32-1.17-0.71-1.51c-7.18-6.34-8.67-14.25-6.41-23.18c0.96-3.79,2.57-7.31,4.62-10.63c0.3-0.49,0.66-0.99,0.79-1.54c1.83-7.37,6.14-13.19,11.8-17.96c3.8-3.2,8.08-5.84,12.19-8.68c0.39-0.27,1.17-0.39,1.54-0.18c8.43,4.68,15.99,10.41,20.94,18.87c1.59,2.71,2.36,5.9,3.49,8.87c0.36,0.94,0.61,1.93,1.07,2.82c2.95,5.7,4.84,11.63,4.14,18.16c-0.57,5.26-2.98,9.55-6.83,13.12c-0.52,0.48-0.97,1.23-1.08,1.92c-0.41,2.48-0.67,4.99-0.98,7.45C164.8,67.76,150.23,67.84,135.67,71.26z"/><path style="fill:' + color1 + '" d="M216.62,227.43c4.34,1.87,8.23,4.3,11.83,7.21c3.77,3.04,6.91,6.65,9.64,10.65c2.95,4.33,6.14,8.5,9.13,12.81c2.72,3.92,5.12,8.02,6.09,12.79c1.03,5.04-0.03,7.86-4.5,10.42c-4.6,2.63-7.71,6.39-9.74,11.19c-1.28,3.04-2.61,6.06-4.02,9.04c-0.55,1.17-1.25,2.31-2.09,3.3c-1.14,1.35-2.52,1.45-3.61,0.06c-1.45-1.85-2.79-3.86-3.8-5.98c-2.84-6.01-5.42-12.13-8.2-18.16c-5.16-11.19-11.11-21.9-19.35-31.38C205.26,242.97,211.3,235.56,216.62,227.43z"/><path style="fill:' + color1 + '" d="M98.29,227.33c5.29,8.15,11.33,15.53,18.49,21.91c-2.2,2.91-4.46,5.68-6.49,8.61c-7.15,10.3-12.34,21.62-17.3,33.07c-1.52,3.51-3.18,6.97-4.89,10.39c-0.62,1.24-1.47,2.41-2.41,3.44c-1.27,1.38-2.67,1.3-3.85-0.16c-0.78-0.97-1.44-2.08-1.96-3.21c-1.47-3.16-2.86-6.36-4.27-9.56c-1.91-4.34-4.69-7.86-8.92-10.22c-4.6-2.56-6.57-4.87-4.64-12.39c1.01-3.95,3.07-7.43,5.38-10.74c3.74-5.37,7.5-10.75,11.41-16C83.87,235.7,90.5,230.89,98.29,227.33z"/></g><g><path style="fill:' + color2 + '" d="M236.76,148.11c-0.06,3.03,0.29,6.51-1.23,9.77c-0.08,0.16-0.12,0.39-0.08,0.56c1.37,5.71-0.38,11.2-1.42,16.68c-1.97,10.37-5.21,20.38-9.52,30.02c-0.81,1.82-2.2,3.39-3.29,5.09c-0.32,0.5-0.6,1.06-0.78,1.62c-0.49,1.51-0.67,3.18-1.41,4.55c-6.09,11.27-13.44,21.59-23.14,30.08c-1.51,1.33-3.35,2.28-5.05,3.4c-0.2,0.13-0.48,0.2-0.62,0.38c-3.19,4.1-7.89,5.94-12.39,7.9c-6.35,2.77-12.88,5.13-19.35,7.63c-0.5,0.19-1.16,0.27-1.65,0.11c-8.79-2.97-17.44-6.27-25.66-10.61c-2.37-1.25-4.68-2.6-6.28-4.85c-0.11-0.15-0.26-0.31-0.43-0.39c-5.89-2.79-9.95-7.68-14.12-12.4c-5.59-6.32-10.13-13.38-14.22-20.74c-0.02-0.04-0.05-0.08-0.07-0.13c-1.62-2.58-1.39-5.82-3.66-8.35c-1.95-2.18-2.84-5.35-4.01-8.16c-1.19-2.86-2.2-5.8-3.16-8.75c-0.95-2.9-1.8-5.84-2.57-8.79c-0.78-3-1.5-6.03-2.06-9.08c-0.64-3.48-1.32-6.98-1.52-10.5c-0.17-2.99-0.32-5.96-0.55-8.95c-1.06-13.31,1.04-26.23,6.41-38.51c0.9-2.06,3.22-3.52,4.03-5.59c0.82-2.08,1.07-4.4,2.39-6.32c6.77-9.86,15.55-17.44,26.29-22.68c3.56-1.74,7.3-3.16,11.05-4.46c1.59-0.55,3.43-0.41,5.15-0.58c0.47-0.05,1.03,0.04,1.39-0.19c3.7-2.41,7.95-2.75,12.14-3.17c8.69-0.87,17.35-0.53,26.01,0.71c3.26,0.47,5.8,2.7,9.34,2.74c3.16,0.03,6.39,1.43,9.44,2.61c3.06,1.19,6.03,2.7,8.87,4.34c2.85,1.65,5.6,3.5,8.23,5.49c5.54,4.19,10.31,9.18,14.11,15c1.13,1.73,1.59,3.9,2.37,5.86c0.19,0.47,0.36,1.02,0.71,1.35c1.96,1.83,3.31,4.02,4.31,6.49C234.44,126.42,236.5,136.77,236.76,148.11z M229.76,148.14c-0.29-9.53-1.77-18.24-4.84-26.67c-1.03-2.83-2.5-5.2-4.94-7.02c-0.52-0.38-0.91-1.25-0.93-1.92c-0.09-2.26-0.74-4.22-2.1-6.07c-8.06-10.96-18.69-18.33-31.48-22.78c-1.76-0.61-3.47-0.87-5.27-0.35c-1.06,0.31-1.91-0.05-2.86-0.56c-1.78-0.94-3.61-2.04-5.54-2.38c-7.99-1.4-16.05-1.46-24.12-0.67c-3.84,0.38-7.62,0.89-10.81,3.42c-0.45,0.36-1.36,0.18-2.06,0.19c-1.39,0.02-2.88-0.34-4.14,0.06c-13.4,4.27-24.46,11.86-32.78,23.29c-1.23,1.69-1.96,3.47-1.88,5.53c0.05,1.16-0.42,1.9-1.31,2.54c-2.06,1.49-3.42,3.47-4.33,5.88c-3.74,9.85-5.25,20.05-5.17,30.53c0.01,1.89,0.38,3.62,1.27,5.31c0.33,0.63,0.25,1.54,0.19,2.31c-0.13,1.63-0.71,3.28-0.53,4.86c1.56,13.04,5.11,25.55,10.18,37.66c0.9,2.16,2.07,4.06,3.96,5.46c0.88,0.65,1.06,1.49,1.17,2.5c0.16,1.42,0.22,2.99,0.87,4.2c4.45,8.29,9.74,16.01,16.25,22.83c2.01,2.1,4.03,4.3,6.42,5.9c2.43,1.62,4.6,3.53,6.93,5.2c7.06,5.08,15.11,7.88,23.89,8.25c8.32,0.34,16.06-1.79,23.33-5.73c2.47-1.34,5.01-2.61,6.5-5.21c0.23-0.4,0.85-0.58,1.3-0.85c1.52-0.92,3.23-1.63,4.54-2.79c8.25-7.28,14.62-16.05,20.12-25.51c1.2-2.06,2.07-4.12,2.06-6.56c0-0.74,0.52-1.66,1.09-2.18c1.6-1.46,2.88-3.07,3.75-5.1c4.6-10.86,7.93-22.08,9.8-33.73c0.53-3.29,1.31-6.61-0.17-9.89c-0.13-0.29-0.06-0.82,0.13-1.1C230.14,154.08,229.76,150.78,229.76,148.14z"/><path style="fill:' + color2 + '" d="M157.48,130.1c-3.64,0-7.29,0-10.93,0c-2.59,0-4.85-0.85-6.8-2.53c-4.53-3.89-9.05-7.8-13.56-11.71c-2.14-1.85-2.48-3.76-1.07-6.2c4.17-7.22,8.38-14.43,12.54-21.66c1.82-3.17,4.76-4.16,8.14-4.55c8.39-0.99,16.76-0.99,25.12,0.27c2.84,0.43,4.97,1.82,6.4,4.29c4.17,7.17,8.34,14.34,12.49,21.52c1.49,2.57,1.15,4.45-1.13,6.43c-4.48,3.88-8.96,7.75-13.46,11.6c-1.95,1.67-4.22,2.53-6.8,2.53C164.76,130.09,161.12,130.1,157.48,130.1z"/><path style="fill:' + color2 + '" d="M157.54,133.6c3.26,0,6.51,0,9.77,0c2.84-0.01,5.19,1.03,7.02,3.23c4.17,5,8.37,9.97,12.54,14.97c1.84,2.21,1.78,4.11-0.16,6.23c-4.66,5.09-9.33,10.16-14,15.24c-1.91,2.08-4.27,3.1-7.11,3.09c-5.44-0.03-10.89-0.02-16.33,0c-2.73,0.01-5.03-0.96-6.88-2.95c-4.75-5.14-9.48-10.29-14.2-15.45c-1.83-2-1.91-3.94-0.2-6c4.25-5.12,8.53-10.21,12.82-15.31c1.73-2.06,4-3.02,6.67-3.03C150.83,133.58,154.18,133.6,157.54,133.6z"/><path style="fill:' + color2 + '" d="M106,204.42c-3.22,0.19-4.86-0.81-6.14-3.76c-5.12-11.87-8.59-24.19-10.28-37.01c-0.42-3.22,0.64-4.64,3.92-4.92c8.27-0.73,16.55-1.43,24.82-2.06c2.22-0.17,4.28,0.57,5.81,2.21c5.17,5.53,10.29,11.11,15.39,16.71c1.5,1.65,1.67,3.79,0.45,5.65c-4.05,6.22-8.12,12.43-12.26,18.59c-1.41,2.1-3.55,3.12-6.05,3.33C116.46,203.59,111.23,204,106,204.42z"/><path style="fill:' + color2 + '" d="M208.81,204.41c-5.23-0.42-10.46-0.81-15.68-1.27c-2.67-0.24-4.76-1.5-6.25-3.77c-3.88-5.93-7.79-11.83-11.68-17.75c-1.5-2.29-1.34-4.35,0.49-6.36c4.89-5.33,9.78-10.66,14.69-15.97c1.87-2.03,4.21-2.82,6.95-2.58c7.59,0.66,15.19,1.28,22.79,1.92c0.58,0.05,1.16,0.07,1.74,0.15c2.75,0.36,3.88,1.71,3.53,4.43c-1.67,13.22-5.27,25.9-10.61,38.1C213.75,203.67,211.89,204.59,208.81,204.41z"/><path style="fill:' + color2 + '" d="M157.63,179.84c2.67,0,5.35,0.01,8.02,0c2.75-0.02,4.88,1.11,6.38,3.39c3.88,5.87,7.75,11.75,11.61,17.63c1.59,2.43,1.29,4.33-0.95,6.26c-3.72,3.2-7.43,6.4-11.15,9.6c-2.03,1.74-4.37,2.65-7.06,2.65c-4.66-0.01-9.33-0.01-13.99,0c-2.69,0.01-5.03-0.91-7.06-2.65c-3.76-3.23-7.51-6.46-11.27-9.69c-2.07-1.79-2.39-3.76-0.9-6.04c3.91-5.97,7.83-11.93,11.77-17.87c1.45-2.18,3.53-3.26,6.15-3.27C151.99,179.84,154.81,179.84,157.63,179.84z"/><path style="fill:' + color2 + '" d="M88.71,147.38c0.3-9,1.86-17.74,5.18-26.14c1.1-2.79,3.08-4.27,6.02-4.52c5.33-0.44,10.66-0.85,15.98-1.29c2.66-0.22,4.98,0.57,6.99,2.3c4.6,3.96,9.21,7.91,13.8,11.89c2.09,1.82,2.24,3.67,0.44,5.83c-3.98,4.78-7.97,9.54-11.98,14.29c-1.65,1.95-3.81,3.01-6.32,3.23c-8.62,0.74-17.23,1.47-25.86,2.13c-3.29,0.25-4.2-0.68-4.26-3.95C88.69,149.91,88.71,148.65,88.71,147.38z"/><path style="fill:' + color2 + '" d="M226.24,147.48c0,1.5,0.03,3.01-0.01,4.51c-0.05,2.15-1.11,3.24-3.3,3.15c-3.1-0.14-6.2-0.45-9.29-0.7c-5.47-0.45-10.93-0.97-16.4-1.33c-3.29-0.22-5.94-1.44-8.05-4.03c-3.74-4.59-7.6-9.08-11.39-13.63c-1.77-2.12-1.62-4,0.48-5.82c4.62-4.01,9.25-8,13.9-11.97c1.89-1.62,4.1-2.41,6.58-2.22c5.47,0.41,10.94,0.85,16.4,1.32c2.88,0.25,4.81,1.75,5.89,4.47C224.4,129.66,225.96,138.44,226.24,147.48z"/><path style="fill:' + color2 + '" d="M157.59,222.86c1.85,0,3.69-0.02,5.54,0.01c2.72,0.04,4.99,1.05,6.75,3.16c3.77,4.51,7.55,9,11.32,13.5c1.95,2.33,1.64,4.08-0.98,5.71c-7.56,4.69-15.69,7.37-24.68,6.92c-7.91-0.4-15.03-3.18-21.62-7.43c-1.78-1.15-1.9-3.1-0.39-4.92c3.38-4.07,6.95-7.99,10.15-12.2c2.66-3.51,5.91-5.16,10.27-4.76C155.16,222.96,156.38,222.86,157.59,222.86z"/><path style="fill:' + color2 + '" d="M128.06,240.6c-2.57-2.48-5.07-4.69-7.35-7.11c-6.06-6.43-10.98-13.69-15.24-21.41c-1.17-2.12-0.51-3.52,1.89-3.75c5.17-0.5,10.35-0.95,15.53-1.3c2.36-0.16,4.51,0.64,6.32,2.18c3.92,3.34,7.82,6.69,11.71,10.06c1.92,1.66,2.15,3.62,0.59,5.49c-3.79,4.55-7.62,9.07-11.45,13.59C129.51,239.02,128.91,239.65,128.06,240.6z"/><path style="fill:' + color2 + '" d="M186.62,240.31c-1.96-2.27-3.86-4.46-5.74-6.68c-2.39-2.81-4.74-5.65-7.12-8.47c-1.91-2.27-1.79-4.1,0.44-6.02c3.79-3.27,7.59-6.52,11.38-9.78c1.97-1.7,4.27-2.51,6.85-2.32c4.94,0.36,9.88,0.78,14.81,1.24c2.86,0.27,3.5,1.6,2.09,4.13c-5.42,9.67-11.8,18.59-20.1,26.04c-0.47,0.42-0.97,0.8-1.48,1.18C187.47,239.82,187.17,239.98,186.62,240.31z"/><path style="fill:' + color2 + '" d="M101.84,112.49c-0.68-0.29-1.79-0.45-1.93-0.9c-0.2-0.69,0.06-1.73,0.5-2.36c7.73-11.07,18.29-18.28,31.01-22.48c0.57-0.19,1.29,0.04,1.93,0.07c-0.09,0.67,0.02,1.46-0.29,2c-3.58,6.28-7.19,12.53-10.86,18.76c-1.17,2-3.03,3.19-5.32,3.42c-4.98,0.49-9.96,0.85-14.95,1.27C101.9,112.35,101.87,112.42,101.84,112.49z"/><path style="fill:' + color2 + '" d="M212.92,112.26c-4.89-0.4-9.78-0.76-14.66-1.22c-2.52-0.24-4.45-1.57-5.73-3.77c-3.5-6.04-7.01-12.08-10.51-18.12c-0.12-0.21-0.3-0.41-0.34-0.64c-0.08-0.54-0.32-1.29-0.06-1.59c0.27-0.31,1.1-0.39,1.58-0.24c2.11,0.7,4.22,1.44,6.27,2.33c10.06,4.36,18.43,10.87,24.82,19.83c0.11,0.16,0.28,0.3,0.33,0.48c0.2,0.76,0.71,1.69,0.45,2.23c-0.26,0.53-1.32,0.67-2.04,0.98C213,112.43,212.96,112.35,212.92,112.26z"/></g>'
def __str__(self):
return '<?xml version="1.0" encoding="utf-8"?><svg version="1.1" id="Layer_1" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" x="0px" y="0px" viewBox="0 0 314.95 305.89" style="enable-background:new 0 0 314.95 305.89;" xml:space="preserve">' + self.svg_ml + "</svg>"
| [
"jacob.hokanson9@gmail.com"
] | jacob.hokanson9@gmail.com |
93608c1c11db74a6723a01767666485d2a5479a0 | a7f8417ec32e03a8af39ad772e4bf51ab13f92c8 | /run_cases/RunAll.py | bc6bccc80a60aff1dfa4d492dab4da883145120b | [
"Apache-2.0"
] | permissive | 18280108415/Interface | 07d0ae42b3d89799f8a64df5ba0da9be7e36c913 | e7362b2ce21377f0383aeb2cbf8ac40dc4e076c3 | refs/heads/main | 2023-04-21T01:19:41.342189 | 2021-05-13T08:29:14 | 2021-05-13T08:40:07 | 366,947,376 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 923 | py | #! /usr/bin/env python
# -*- coding:utf-8 -*-
"""
@version: 1.0
@author: fky
@site:
@software: PyCharm
@file: run_main.py
@time: 2018/3/16 10:58
"""
import unittest
import time
import os
from Common.HTMLTestRunner_jpg import HTMLTestRunner
def run_case(dir = "testCases"):
case_dir = os.path.dirname(os.getcwd()) + "\\" + dir
print(case_dir)
test_case = unittest.TestSuite()
discover = unittest.defaultTestLoader.discover(case_dir,pattern="test*.py",top_level_dir=None)
return discover
if __name__ == '__main__':
current_time = time.strftime("%Y-%m-%d-%H_%M_%S", time.localtime(time.time()))
report_path = os.path.dirname(os.getcwd()) + "\\report\\" + current_time + '.html' # 生成测试报告的路径
fp = open(report_path, "wb")
runner = HTMLTestRunner(stream=fp, title=u"TestReport", description=u'interface test report',verbosity=2)
runner.run(run_case())
fp.close() | [
"523289267@qq.com"
] | 523289267@qq.com |
fa2e81222bbf7aadd99e6336533fca446bfa4426 | dbafa9b570e9ae62a5c6eb3afa012ef1a53b51a3 | /apps/wish_list/urls.py | afb6d40723806b061d7c6f489482759d70d76966 | [] | no_license | dpm530/deploy_test | 96d2a80691b17258063256bdfe7a7f72199bcf77 | d93089c236b5ca51c9623c9caee3237a81d3132f | refs/heads/master | 2021-01-01T18:57:45.424985 | 2017-07-26T22:51:23 | 2017-07-26T22:51:23 | 98,471,925 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 437 | py | from django.conf.urls import url
from . import views
urlpatterns=[
url(r'^$',views.index),
url(r'^register$',views.register),
url(r'^homePage$',views.homePage),
url(r'^login$',views.login),
url(r'^createWishListPage$',views.createWishListPage),
url(r'^createNewListItem$',views.createNewListItem),
url(r'^deleteItem/(?P<item_id>\d+)$',views.deleteItem),
url(r'^listItem/(?P<id>\d+)$',views.listItem),
]
| [
"Daniel@Daniels-MacBook-Pro.local"
] | Daniel@Daniels-MacBook-Pro.local |
e511c1dde4811ab32f8e66df43d7bbdea1921b75 | 4c401336430f203a140645c9af4593465cbce3ef | /teachme-rest-api/app/tests/test_school_level.py | 13fc8e3b23118ed22fe874a3720ab373731226fe | [
"Apache-2.0"
] | permissive | kiditz/teachme | f3b1d7cc25a2e114a1fca7a92e6786f9993c6f4a | b94852778ef1f3d7c98c45e34e5b6d048aca7c75 | refs/heads/master | 2022-12-17T10:35:48.574296 | 2018-08-21T15:53:36 | 2018-08-21T15:53:36 | 135,743,753 | 0 | 0 | Apache-2.0 | 2022-12-08T02:25:10 | 2018-06-01T17:05:42 | Roff | UTF-8 | Python | false | false | 543 | py | import unittest
import requests
from slerp.logger import logging
log = logging.getLogger(__name__)
class TestSchoolLevel(unittest.TestCase):
BASE_URL = "http://localhost:5002/"
def setUp(self):
pass
def test_get_school_level(self):
# TODO : Replace All Input GET
result = requests.get(self.BASE_URL + 'get_school_level', params={
})
log.info('test_get_school_level: %s', result.json())
self.assertIs(result.status_code, 200)
self.assertEqual(result.json()['status'], 'OK')
if __name__ == '__main__':
unittest.main()
| [
"kiditzbastara@gmail.com"
] | kiditzbastara@gmail.com |
f9ce555501e5c966e14dec0525a33a283154e3bb | 680c779e92208bfe4297760ba8568e0e90dc531d | /pathfinding.py | 8de12c0ae6b4955c4ae68eb6379c3db4f1bf4ae0 | [] | no_license | Lfcamacho/Pathfinding_Visualization | 4bdce6fbebb578beda10c2b978204d8205ac383e | 40e54e5cab65975d2d06ee70a6975e6fd0f0f61a | refs/heads/master | 2022-12-05T17:32:56.480013 | 2020-08-21T22:25:41 | 2020-08-21T22:25:41 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,662 | py | import pygame
from queue import PriorityQueue
import math
# Settings
SQUARE_SIZE = 15
COLUMNS = 50
ROWS = 40
GRID_POS_X = 0
GRID_POS_Y = 0
GRID_WIDTH = COLUMNS * SQUARE_SIZE
GRID_HEIGHT = ROWS * SQUARE_SIZE
WIDTH = GRID_WIDTH
HEIGHT = GRID_HEIGHT
WIN = pygame.display.set_mode((WIDTH, HEIGHT))
pygame.display.set_caption("Path Finding")
# Colors
WHITE = [255,255,255]
BLACK = [0,0,0]
RED = [255,0,0]
GREEN = [0,255,0]
BLUE = [0,0,255]
PURPLE = [128,0,128]
# Fonts
pygame.font.init()
NUMBER_FONT = pygame.font.SysFont("comicsans", 35)
TITLE_FONT = pygame.font.SysFont("comicsans", 50)
BUTTON_FONT = pygame.font.SysFont("comicsans", 20)
class Board:
def __init__(self, rows, columns):
self.rows = rows
self.columns = columns
self.create_grid()
def create_grid(self):
self.grid = []
x = 0
y = 0
for i in range(self.rows):
row = []
for j in range(self.columns):
row.append(Square(i, j, SQUARE_SIZE))
x += 1
self.grid.append(row)
x = 0
y += 1
def draw_grid(self, win):
thickness = 1
# Draw vertical lines
for i in range(self.columns):
pygame.draw.line(win, BLACK, (i * SQUARE_SIZE + GRID_POS_X, GRID_POS_Y), (i * SQUARE_SIZE + GRID_POS_X, GRID_POS_Y + GRID_HEIGHT), thickness)
# Draw horizontal lines
for i in range(self.rows):
pygame.draw.line(win, BLACK, (GRID_POS_X, i * SQUARE_SIZE + GRID_POS_Y), (GRID_POS_X + GRID_WIDTH, i * SQUARE_SIZE + GRID_POS_Y), thickness)
def draw_squares(self, win):
for i in range(self.rows):
for j in range(self.columns):
self.grid[i][j].draw_square(win)
def get_grid_position(self, pos):
col = int((pos[0] - GRID_POS_X) // SQUARE_SIZE)
row = int((pos[1] - GRID_POS_Y) // SQUARE_SIZE)
return row, col
def create_neighbors(self):
for row in self.grid:
for square in row:
square.update_neighbors(self.grid)
class Square:
def __init__(self, row, col, size):
self.x = col * size + GRID_POS_X
self.y = row * size + GRID_POS_Y
self.row = row
self.col = col
self.size = size
self.color = WHITE
self.obstacle = False
self.neighbors = []
self.visited = False
self.parent = None
self.f_score = float("inf")
self.g_score = 0
def draw_square(self, win):
pygame.draw.rect(win, self.color, (self.x, self.y, self.size, self.size))
def __lt__ (self, other):
return False
def make_start(self):
self.color = BLUE
def make_end(self):
self.color = BLUE
def make_obstacle(self):
self.color = BLACK
self.obstacle = True
def make_open(self):
self.color = GREEN
def make_visited(self):
self.color = RED
self.visited = True
def make_path(self):
self.color = PURPLE
def reset(self):
self.color = WHITE
self.obstacle = False
def update_neighbors(self, grid):
for i in range(-1, 2):
for j in range(-1, 2):
if self.valid_neighbor(grid, self.row + i, self.col + j):
self.neighbors.append(grid[self.row + i][self.col + j])
def valid_neighbor(self, grid, row, col):
if row >= 0 and row < len(grid) and col >= 0 and col < len(grid[0]):
if self.row != row or self.col != col:
if not grid[row][col].obstacle:
return True
return False
def draw_window(board):
WIN.fill(WHITE)
board.draw_squares(WIN)
board.draw_grid(WIN)
pygame.display.update()
def heuristic(p1, p2):
x1, y1 = p1[0], p1[1]
x2, y2 = p2[0], p2[1]
h = math.sqrt((x1 - x2) ** 2 + (y1 - y2) ** 2)
return h
def find_path(board, start, end):
open_nodes = PriorityQueue()
open_nodes.put((0, 0, start))
while True:
for event in pygame.event.get():
if event.type == pygame.QUIT:
quit()
current = open_nodes.get()[2]
if current == end:
return True
current.make_visited()
if current == start:
current.color = BLUE
for node in current.neighbors:
if not node.visited:
h_score = heuristic((node.col, node.row), (end.col, end.row))
g_score = current.g_score + heuristic((node.col, node.row), (current.col, current.row))
f_score = h_score + g_score
if f_score < node.f_score:
node.make_open()
node.f_score = f_score
node.g_score = g_score
node.parent = current
open_nodes.put((f_score, h_score, node))
draw_window(board)
def draw_path(board, start, end):
current = end
while True:
current = current.parent
if current == start:
return True
current.make_path()
def main():
run = True
FPS = 60
start = None
end = None
board = Board(ROWS, COLUMNS)
clock = pygame.time.Clock()
while run:
clock.tick(FPS)
for event in pygame.event.get():
if event.type == pygame.QUIT:
quit()
if pygame.mouse.get_pressed()[0]:
mouse_pos = pygame.mouse.get_pos()
row, col = board.get_grid_position(mouse_pos)
square = board.grid[row][col]
if not start and square != end:
square.make_start()
start = square
if not end and square != start:
square.make_end()
end = square
if square != start and square != end:
square.make_obstacle()
if pygame.mouse.get_pressed()[2]:
mouse_pos = pygame.mouse.get_pos()
row, col = board.get_grid_position(mouse_pos)
square = board.grid[row][col]
square.reset()
if square == start:
start = None
if square == end:
end = None
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_SPACE:
board.create_neighbors()
find_path(board, start, end)
draw_path(board, start, end)
draw_window(board)
main() | [
"lfcamacho06@gmail.com"
] | lfcamacho06@gmail.com |
bee215f8c866ac4d57e174fc83c35868544aeea3 | 81c0dc9578112f5b370c23faa0126493dafdd490 | /swappi/db_schema.py | 441d98635e991d2717315692e5ced8f3f1fd6c44 | [
"MIT"
] | permissive | mhhoban/swappi-project | cc9eecd0ceb7e4736188fcb6c7ae1b38b3779837 | 6f11d04d63aa5ceeb826f2a7e0cb34a87aa71dfb | refs/heads/master | 2020-05-27T01:47:34.936250 | 2017-09-02T05:27:43 | 2017-09-02T05:27:43 | 82,513,369 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,064 | py | from sqlalchemy import Column, ForeignKey, Integer, String
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import relationship
from sqlalchemy import create_engine
Base = declarative_base()
class Users(Base):
__tablename__ = 'users'
id = Column(Integer, primary_key=True)
name = Column(String(50), nullable=False)
email = Column(String(50), nullable=False)
class Categories(Base):
__tablename__ = 'categories'
id = Column(Integer, primary_key=True)
name = Column(String(50), nullable=False)
class Items(Base):
__tablename__ = 'items'
id = Column(Integer, primary_key=True)
category_id = Column(Integer, ForeignKey('categories.id'))
category = relationship(Categories)
title = Column(String(50), nullable=False)
description = Column(String(250))
poster_id = Column(Integer, ForeignKey('users.id'))
poster = relationship(Users)
swap_for = Column(String(100), nullable=False)
engine = create_engine('sqlite:///db/itemcatalog.db')
Base.metadata.create_all(engine) | [
"mhhoban@gmail.com"
] | mhhoban@gmail.com |
a01e65e47615d48efba8b8291cb3cecbb824f934 | d9c78cbb1824b494c213d4d1890434f110719e1c | /Dictionary/speed_Example.py | 32e32a5c2c39530e4384361996db884326f277a9 | [] | no_license | JoshMez/Python_Intro | 2fe9fc267b4aaaa17ddc578671e475425fd249c6 | 3687e96eec55e8f86916c28b2a37b282287ee409 | refs/heads/master | 2023-03-07T23:49:46.977806 | 2021-02-21T15:52:24 | 2021-02-21T15:52:24 | 292,469,542 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 857 | py | #Aim, Track the position of a alien that can move at different speeds,
#
#
alien0 = {'x-position': 0,
'y-position' : 25,
'speed' : 'medium'
}
#Want the key value to be a variable.
origina_Pos = alien0['x-position']
#
#Original Position of the ship.
print(f"The original position of the ship is {origina_Pos}")
#
#
#ALien needs to move right.
#Depeding on speed, will determine the its increment size.
#
speed = alien0['speed']
#Using an if statment to test the speed.
#Speed tested will determing the increment.
if speed.title() == 'slow' or speed.upper() == 'slow' or speed == 'slow':
#Creating a variable called increment.
increment_x = 2
elif speed == 'medium':
increment_x = 3
else:
increment_x = 4
#Increment is add to the original position.
print(f"The new alien speed is {origina_Pos + increment_x}")
| [
"joshuamezieres10@gmail.com"
] | joshuamezieres10@gmail.com |
c11ee44633ac855b635d80da88d711d373e23c60 | 59886a1143cc4043b19e398fae1fddb5742b4b55 | /src/main/python/rlbot/agents/base_java_agent.py | 558144395485290f687591f9f3c43416c417fb28 | [
"MIT"
] | permissive | RLBot/RLBot | a6c4f502403f02822b3e4078b27583226584432e | c2f7c9a07911691b112b5338008e2ec932e7aee0 | refs/heads/master | 2023-08-16T06:04:35.384448 | 2023-07-01T11:21:26 | 2023-07-01T11:21:26 | 80,671,678 | 482 | 138 | MIT | 2023-07-01T11:21:28 | 2017-02-01T22:36:52 | Python | UTF-8 | Python | false | false | 542 | py | from rlbot.agents.base_independent_agent import BaseIndependentAgent
class BaseJavaAgent(BaseIndependentAgent):
def __init__(self, name, team, index):
super().__init__(name, team, index)
raise NotImplementedError(
f"Cannot run {name} because BaseJavaAgent is deprecated! "
f"Please migrate to ExecutableWithSocketAgent! For more details see "
f"https://github.com/RLBot/RLBotJavaExample/wiki/Py4j-Deprecation")
def run_independently(self, terminate_request_event):
pass
| [
"noreply@github.com"
] | noreply@github.com |
4aa619c5f0da271cf82f1c1c1edb77fb610b3181 | 4b17d98ad2a3ef018cfb33f7f1d645ede72eb808 | /models.py | 317ff17f52dc2e3d03d3556e07facbc26924d19b | [
"MIT"
] | permissive | poshan0126/Facial-Keypoint-Detection | 932ce0b85d7b1b0b893376537a5cf7c148704ee7 | fc52574b4c006e3afd86f209369e1a3e704a65fa | refs/heads/master | 2020-09-02T19:43:20.650541 | 2019-11-03T11:53:30 | 2019-11-03T11:53:30 | 219,292,492 | 0 | 0 | MIT | 2020-01-19T09:34:06 | 2019-11-03T11:47:10 | Jupyter Notebook | UTF-8 | Python | false | false | 3,030 | py | ## TODO: define the convolutional neural network architecture
num_output = 136 # As it's suggest final linear layer have to output 136 values, 2 for each of the 68 keypoint (x,y) pairs.
import torch
from torch.autograd import Variable
import torch.nn as nn
import torch.nn.functional as F
# can use the below import should you choose to initialize the weights of your Net
import torch.nn.init as I
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
## TODO: Define all the layers of this CNN, the only requirements are:
## 1. This network takes in a square (same width and height), grayscale image as input
## 2. It ends with a linear layer that represents the keypoints
## it's suggested that you make this last layer output 136 values, 2 for each of the 68 keypoint (x, y) pairs
# As an example, you've been given a convolutional layer, which you may (but don't have to) change:
## Note that among the layers to add, consider including:
# maxpooling layers, multiple conv layers, fully-connected layers, and other layers (such as dropout or batch normalization) to avoid overfitting
## Define layers of a CNN
## 1 input image channel (grayscale), 32 output channels/feature maps, 5x5 square convolution kernel
self.conv1 = nn.Conv2d(1, 32, kernel_size=(3,3), stride=1, padding=1)
self.conv2 = nn.Conv2d(32, 32, kernel_size=(3,3), stride=1, padding=1)
self.conv3 = nn.Conv2d(32, 64, kernel_size=(3,3), stride=1, padding=1)
self.conv4 = nn.Conv2d(64, 64, kernel_size=(3,3), stride=1, padding=1)
self.conv5 = nn.Conv2d(64, 128, kernel_size=(3,3), stride=1, padding=1)
self.pool = nn.MaxPool2d(kernel_size=(2,2), stride=2)
# Output of convulation layer would have height and width of 3 and depth of 128
self.fc1 = nn.Linear(28*28*128, num_output)
#self.fc2 = nn.Linear(10000, num_output)
self.dropout = nn.Dropout(0.5)
def forward(self, x):
#print("Enters Forward")
## TODO: Define the feedforward behavior of this model
## x is the input image and, as an example, here you may choose to include a pool/conv step:
## x = self.pool(F.relu(self.conv1(x)))
# a modified x, having gone through all the layers of your model, should be returned
x = F.relu(self.conv1(x))
x = self.pool(x)
x = F.relu(self.conv2(x))
x = F.relu(self.conv3(x))
x = self.pool(x)
x = F.relu(self.conv4(x))
x = F.relu(self.conv5(x))
x = self.pool(x)
# flatten the input image
#x = x.view(x.size(0), -1) same as x.view(-1, 28x28x128)
x = x.view(-1, 28*28*128)
# First hidden layer
x = F.relu(self.fc1(x))
x = self.dropout(x)
#x = self.fc2(x)
#print(x.shape)
#print("Forwarded")
return x
| [
"aakrist666@gmail.com"
] | aakrist666@gmail.com |
7011576b08ff9e19cec9af1c4f2039410b117085 | d675bec53fe8ff470bf0287d64b418bfb4a2f93e | /tools/category_count.py | 53f447e01c48cc4460a56412fff3e1bd7c34b36c | [] | no_license | RolfSievert/litter-det | 0ba309fbc82b7e8de09bbc5d3dae2e471239a744 | c74b275a234eac5ebe26dcc9970cd1d1a3dceb6c | refs/heads/main | 2023-04-19T11:46:00.679523 | 2021-04-03T08:06:35 | 2021-04-03T08:06:35 | 364,611,467 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,079 | py | #! /usr/bin/env python
# -*- coding: utf-8 -*-
# vim:fenc=utf-8
"""
Prints and plots all class quantities.
TODO
* Add deviation of instances and images per category
"""
import copy
import json
import argparse
parser = argparse.ArgumentParser(description='User args')
parser.add_argument('annotations', help='Path to dataset annotations')
args = parser.parse_args()
# Load annotations
with open(args.annotations, 'r') as f:
dataset = json.loads(f.read())
anns = dataset['annotations']
scene_anns = dataset['scene_annotations']
imgs = dataset['images']
nr_images = len(imgs)
categories = {}
super_categories = {}
for cat in dataset['categories']:
categories[cat['id']] = {'name': cat['name'], 'supercategory': cat['supercategory'], 'images': [], 'instances': []}
super_categories[cat['supercategory']] = {'name': cat['supercategory'], 'images': [], 'instances': []}
"""
1. Save data of categories
* add image to set of category images
* add instance to category
2. Save data of super categories
* add image to set of super category images
* add instance to super category
"""
for a in anns:
a_id = a["id"]
im_id = a["image_id"]
c_id = a["category_id"]
sup_cat = categories[c_id]['supercategory']
# add to categories
categories[c_id]['instances'].append(a_id)
if im_id not in categories[c_id]['images']:
categories[c_id]['images'].append(im_id)
# add to super categories
super_categories[sup_cat]['instances'].append(a_id)
if im_id not in super_categories[sup_cat]['images']:
super_categories[sup_cat]['images'].append(im_id)
def print_info(categories, super_categories):
# print category info
c_cop = copy.deepcopy(categories)
s_cop = copy.deepcopy(super_categories)
for k, v in c_cop.items():
v['image_count'] = len(v['images'])
v['instance_count'] = len(v['instances'])
v.pop('images')
v.pop('instances')
for k, v in s_cop.items():
v['image_count'] = len(v['images'])
v['instance_count'] = len(v['instances'])
v.pop('images')
v.pop('instances')
print(c_cop)
print(s_cop)
print_info(categories, super_categories)
category_count = {v['name']: len(v['instances']) for k, v in categories.items()}
category_im_count = {v['name']: len(v['images']) for k, v in categories.items()}
super_category_count = {k: len(v['instances']) for k, v in super_categories.items()}
super_category_im_count = {k: len(v['images']) for k, v in super_categories.items()}
# sort discts
category_count_sorted = sorted(category_count.items(), key=lambda item: item[1], reverse=True)
super_category_count_sorted = sorted(super_category_count.items(), key=lambda item: item[1], reverse=True)
sup_len = len(super_category_count)
sup_total = 0
print("format: (instance count, category name, image count)")
print(f"Super Categories: ({sup_len})")
for i, (cat, count) in enumerate(super_category_count_sorted):
print(f"\t {count}: \t {cat} ({super_category_im_count[cat]})")
sup_total += count
if i == sup_len//2:
sup_median = count
print(f"Super category mean: {sup_total/sup_len}, median: {sup_median}")
cat_len = len(category_count)
cat_total = 0
print(f"Categories: ({cat_len})")
for i, (cat, count) in enumerate(category_count_sorted):
print(f"\t {count}: \t {cat} ({category_im_count[cat]})")
cat_total += count
if i == cat_len//2:
cat_median = count
print(f"Category mean: {cat_total/cat_len}, median: {cat_median}")
# plot
import matplotlib.pyplot as plt
plt.style.use("seaborn-darkgrid")
plt.bar(categories.keys(), [len(c['images']) for c in categories.values()], label="Image count per category")
plt.ylabel("Quantity")
plt.xlabel("Category ID")
plt.legend()
plt.savefig("image_count.jpg", dpi=200, bbox_inches='tight')
plt.show()
plt.bar(categories.keys(), [len(c['instances']) for c in categories.values()], label='Instance count per category')
plt.ylabel("Quantity")
plt.xlabel("Category ID")
plt.legend()
plt.savefig("instance_count.jpg", dpi=200, bbox_inches='tight')
plt.show()
| [
"26115145+RolfSievert@users.noreply.github.com"
] | 26115145+RolfSievert@users.noreply.github.com |
6f11a24f41e110a714144a80ec485cedcf1efbb4 | 8e43e0d1d0f81a3a2a5e21137b43cc40bcd62a70 | /lib/cretonne/meta/isa/riscv/registers.py | f00c9a0f0d8579dfa8266d8b36f4fbe789528193 | [
"Apache-2.0"
] | permissive | pnkfelix/cretonne | 52c86cc1056d3d4b627ad8b256febab21cdf0e37 | f9af88c49e8c4da64d9bc1e30434aaf050b983df | refs/heads/master | 2020-06-11T05:57:49.068844 | 2016-12-08T23:57:28 | 2016-12-08T23:57:28 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 503 | py | """
RISC-V register banks.
"""
from __future__ import absolute_import
from cdsl.registers import RegBank, RegClass
from .defs import ISA
# We include `x0`, a.k.a `zero` in the register bank. It will be reserved.
IntRegs = RegBank(
'IntRegs', ISA,
'General purpose registers',
units=32, prefix='x')
FloatRegs = RegBank(
'FloatRegs', ISA,
'Floating point registers',
units=32, prefix='f')
GPR = RegClass('GPR', IntRegs)
FPR = RegClass('FPR', FloatRegs)
| [
"jolesen@mozilla.com"
] | jolesen@mozilla.com |
df91c9a9b9937a18b50fc7a7be16c73b905500d8 | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/nouns/_true.py | ada6ea76ff35da675f31a995503e77327d2954a1 | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 359 | py |
#calss header
class _TRUE():
def __init__(self,):
self.name = "TRUE"
self.definitions = [u'to not be in the correct position or to be slightly bent out of the correct shape: ']
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.specie = 'nouns'
def run(self, obj1 = [], obj2 = []):
return self.jsondata
| [
"xingwang1991@gmail.com"
] | xingwang1991@gmail.com |
92841d5104e532eefde1c249cb662386614e8381 | 30c83cb957cf0ed420cc59aeb8369236f68d010f | /10-1.py | 44e8dfcc770cd23226f4be9d8e0ae102545eb380 | [
"MIT"
] | permissive | AlanAu/Advent-of-Code-2016 | 7c753e1bb167eff564181eecb9ae13fed8bbdc4c | 842cb49b1a6ef650e73501fc747bc92c8f9fc860 | refs/heads/master | 2021-01-13T14:52:09.001824 | 2017-12-20T19:24:33 | 2017-12-20T19:24:33 | 76,512,135 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,020 | py | '''
--- Day 10: Balance botValues ---
You come upon a factory in which many robots are zooming around handing small microchips to each other.
Upon closer examination, you notice that each bot only proceeds when it has two microchips, and once it does, it gives each one to a different bot or puts it in a marked "output" bin. Sometimes, botValues take microchips from "input" bins, too.
Inspecting one of the microchips, it seems like they each contain a single number; the botValues must use some logic to decide what to do with each chip. You access the local control computer and download the botValues' instructions (your puzzle input).
Some of the instructions specify that a specific-valued microchip should be given to a specific bot; the rest of the instructions indicate what a given bot should do with its lower-value or higher-value chip.
For example, consider the following instructions:
value 5 goes to bot 2
bot 2 gives low to bot 1 and high to bot 0
value 3 goes to bot 1
bot 1 gives low to output 1 and high to bot 0
bot 0 gives low to output 2 and high to output 0
value 2 goes to bot 2
Initially, bot 1 starts with a value-3 chip, and bot 2 starts with a value-2 chip and a value-5 chip.
Because bot 2 has two microchips, it gives its lower one (2) to bot 1 and its higher one (5) to bot 0.
Then, bot 1 has two microchips; it puts the value-2 chip in output 1 and gives the value-3 chip to bot 0.
Finally, bot 0 has two microchips; it puts the 3 in output 2 and the 5 in output 0.
In the end, output bin 0 contains a value-5 microchip, output bin 1 contains a value-2 microchip, and output bin 2 contains a value-3 microchip. In this configuration, bot number 2 is responsible for comparing value-5 microchips with value-2 microchips.
Based on your instructions, what is the number of the bot that is responsible for comparing value-61 microchips with value-17 microchips?
input: str
output: int
'''
inFile = open("10.txt",'r')
valFile = open("10.txt",'r')
#inFile = open("10a.txt",'r')
#valFile = open("10a.txt",'r')
lChip = '17'
hChip = '61'
#lChip = 2
#hChip = 5
botValues = {}
instructions = {}
def activate(target,value):
if target not in botValues:
botValues[target] = [value]
else:
botValues[target].append(value)
if len(botValues[target]) > 1:
lowBot = instructions[target][0]
highBot = instructions[target][1]
lowVal, highVal = sorted(botValues[target],key=lambda x:int(x))
botValues[target] = []
if lowVal == lChip and highVal == hChip:
print("Number of the bot which compares chips",highVal,"and",lowVal,"is:",target.split()[1])
activate(lowBot,lowVal)
activate(highBot,highVal)
for _ in inFile:
inst = _.split()
if inst[0] == "bot":
active = " ".join([inst[0],inst[1]])
low = " ".join([inst[5],inst[6]])
high = " ".join([inst[10],inst[11]])
instructions[active] = [low,high]
for _ in valFile:
inst = _.split()
if inst[0] == "value":
value = inst[1]
target = " ".join([inst[4],inst[5]])
activate(target,value) | [
"alan.au@gmail.com"
] | alan.au@gmail.com |
83d473d730c79476a469462dc89029202d29a28c | fe5ed850257cc8af4df10de5cffe89472eb7ae0b | /8.面对对象编程初探/面向对象程序学习/深入类和对象/魔术方法/魔法方法.py | a67e400ecfa57c124cd2bb3759d9d3d310f36d31 | [] | no_license | hujianli94/Python-code | a0e6fe6362868407f31f1daf9704063049042d9e | fe7fbf59f1bdcbb6ad95a199262dd967fb04846c | refs/heads/master | 2020-09-13T01:11:34.480999 | 2019-11-19T05:29:59 | 2019-11-19T05:29:59 | 222,614,662 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,130 | py | #!/usr/bin/env python
#-*- coding:utf8 -*-
'''
和比较相关的魔术方法
__eq__(self, other) self == other
__ne__(self, other) self != other
__lt__(self, other) self < other
__gt__(self, other) self > other
__le__(self, other) self <= other
__ge__(self, other) self >= other
和数学相关的魔术方法
__add__(self, other) self + other
__sub__(self, other) self - other
__mul__(self, other) self * other
__floordiv__(self, other) self // other
__truediv__(self, other) self / other
__mod__(self, other) self % other
__pow__(self, other) self ** other
'''
class Word():
def __init__(self,text):
self.text = text
# def __eq__(self, other):
# if self.text.lower() == other.lower():
# print("True")
# else:
# print("False")
def __add__(self, other):
if isinstance(self.text, int):
return self.text + other
else:
return False
# def __str__(self):
# return self.text
def __repr__(self):
return 'Word("self.text")'
first = Word(10)
first.__eq__(10)
print(first)
add = first.__add__(20)
print(add) | [
"1879324764@qq.com"
] | 1879324764@qq.com |
0e6f31180db674bc453059f0e48b119b4326f74b | 1ebca18e6279128d8a5ecce5a8789723902ef4e3 | /patient/migrations/0006_auto_20201114_0425.py | 75380186756bd4be2ee43dc93c4902c2d1c18dd4 | [] | no_license | tanjim17/HackTheVerse_Toddlers | 1fa9b9a5d1506661f6ae8ab192356d1bcf2f68bf | 71bfb71ddb84ea9e123592f1b06cfa9418a80ad7 | refs/heads/main | 2023-01-30T10:42:31.673528 | 2020-12-13T03:31:18 | 2020-12-13T03:31:18 | 312,544,228 | 1 | 0 | null | 2020-12-13T03:31:19 | 2020-11-13T10:27:55 | Python | UTF-8 | Python | false | false | 382 | py | # Generated by Django 2.0 on 2020-11-13 22:25
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('patient', '0005_remove_patient_dischargedate'),
]
operations = [
migrations.AlterField(
model_name='patient',
name='age',
field=models.IntegerField(),
),
]
| [
"nazmultakbir98@gmail.com"
] | nazmultakbir98@gmail.com |
50db7d3cbbf9fa9c19ce0fb0431ea172406b3f3e | a9063fd669162d4ce0e1d6cd2e35974274851547 | /swagger_client/api/im_chat_api.py | 2f290030aa133f9b5950f05a99c87affd9397deb | [] | no_license | rootalley/py-zoom-api | 9d29a8c750e110f7bd9b65ff7301af27e8518a3d | bfebf3aa7b714dcac78be7c0affb9050bbce8641 | refs/heads/master | 2022-11-07T14:09:59.134600 | 2020-06-20T18:13:50 | 2020-06-20T18:13:50 | 273,760,906 | 1 | 3 | null | null | null | null | UTF-8 | Python | false | false | 28,116 | py | # coding: utf-8
"""
Zoom API
The Zoom API allows developers to safely and securely access information from Zoom. You can use this API to build private services or public applications on the [Zoom App Marketplace](http://marketplace.zoom.us). To learn how to get your credentials and create private/public applications, read our [Authorization Guide](https://marketplace.zoom.us/docs/guides/authorization/credentials). All endpoints are available via `https` and are located at `api.zoom.us/v2/`. For instance you can list all users on an account via `https://api.zoom.us/v2/users/`. # noqa: E501
OpenAPI spec version: 2.0.0
Contact: developersupport@zoom.us
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import re # noqa: F401
# python 2 and python 3 compatibility library
import six
from swagger_client.api_client import ApiClient
class IMChatApi(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def im_chat_messages(self, session_id, _from, to, **kwargs): # noqa: E501
"""Retrieve IM Chat Messages # noqa: E501
Retrieve IM chat messages for a specified period of time. <aside>Note: This API only supports oauth2.</aside><br><br> **Scopes:** `imchat:read`<br> **[Rate Limit Label](https://marketplace.zoom.us/docs/api-reference/rate-limits#rate-limits):** `Medium`<br> # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.im_chat_messages(session_id, _from, to, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str session_id: IM chat session ID. (required)
:param date _from: Start date in 'yyyy-mm-dd' format. The date range defined by the \"from\" and \"to\" parameters should only be one month as the report includes only one month worth of data at once. (required)
:param date to: End date. (required)
:param int page_size: The number of records returned within a single API call.
:param str next_page_token: The next page token is used to paginate through large result sets. A next page token will be returned whenever the set of available results exceeds the current page size. The expiration period for this token is 15 minutes.
:return: InlineResponse20021
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.im_chat_messages_with_http_info(session_id, _from, to, **kwargs) # noqa: E501
else:
(data) = self.im_chat_messages_with_http_info(session_id, _from, to, **kwargs) # noqa: E501
return data
def im_chat_messages_with_http_info(self, session_id, _from, to, **kwargs): # noqa: E501
"""Retrieve IM Chat Messages # noqa: E501
Retrieve IM chat messages for a specified period of time. <aside>Note: This API only supports oauth2.</aside><br><br> **Scopes:** `imchat:read`<br> **[Rate Limit Label](https://marketplace.zoom.us/docs/api-reference/rate-limits#rate-limits):** `Medium`<br> # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.im_chat_messages_with_http_info(session_id, _from, to, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str session_id: IM chat session ID. (required)
:param date _from: Start date in 'yyyy-mm-dd' format. The date range defined by the \"from\" and \"to\" parameters should only be one month as the report includes only one month worth of data at once. (required)
:param date to: End date. (required)
:param int page_size: The number of records returned within a single API call.
:param str next_page_token: The next page token is used to paginate through large result sets. A next page token will be returned whenever the set of available results exceeds the current page size. The expiration period for this token is 15 minutes.
:return: InlineResponse20021
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['session_id', '_from', 'to', 'page_size', 'next_page_token'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method im_chat_messages" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'session_id' is set
if ('session_id' not in params or
params['session_id'] is None):
raise ValueError("Missing the required parameter `session_id` when calling `im_chat_messages`") # noqa: E501
# verify the required parameter '_from' is set
if ('_from' not in params or
params['_from'] is None):
raise ValueError("Missing the required parameter `_from` when calling `im_chat_messages`") # noqa: E501
# verify the required parameter 'to' is set
if ('to' not in params or
params['to'] is None):
raise ValueError("Missing the required parameter `to` when calling `im_chat_messages`") # noqa: E501
collection_formats = {}
path_params = {}
if 'session_id' in params:
path_params['sessionId'] = params['session_id'] # noqa: E501
query_params = []
if '_from' in params:
query_params.append(('from', params['_from'])) # noqa: E501
if 'to' in params:
query_params.append(('to', params['to'])) # noqa: E501
if 'page_size' in params:
query_params.append(('page_size', params['page_size'])) # noqa: E501
if 'next_page_token' in params:
query_params.append(('next_page_token', params['next_page_token'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/xml']) # noqa: E501
# Authentication setting
auth_settings = ['OAuth'] # noqa: E501
return self.api_client.call_api(
'/im/chat/sessions/{sessionId}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='InlineResponse20021', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def im_chat_sessions(self, _from, to, **kwargs): # noqa: E501
"""Get IM Chat Sessions # noqa: E501
Retrieve IM Chat sessions for a specified period of time. <aside>Note: This API only supports Oauth2.</aside><br> **Scopes:** `imchat:read, imchat:read:admin`<br> **[Rate Limit Label](https://marketplace.zoom.us/docs/api-reference/rate-limits#rate-limits):** `Heavy` # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.im_chat_sessions(_from, to, async_req=True)
>>> result = thread.get()
:param async_req bool
:param date _from: Start date in 'yyyy-mm-dd' format. The date range defined by the \"from\" and \"to\" parameters should only be one month as the report includes only one month worth of data at once. (required)
:param date to: End date. (required)
:param int page_size: The number of records returned within a single API call.
:param str next_page_token: The next page token is used to paginate through large result sets. A next page token will be returned whenever the set of available results exceeds the current page size. The expiration period for this token is 15 minutes.
:return: InlineResponse20020
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.im_chat_sessions_with_http_info(_from, to, **kwargs) # noqa: E501
else:
(data) = self.im_chat_sessions_with_http_info(_from, to, **kwargs) # noqa: E501
return data
def im_chat_sessions_with_http_info(self, _from, to, **kwargs): # noqa: E501
"""Get IM Chat Sessions # noqa: E501
Retrieve IM Chat sessions for a specified period of time. <aside>Note: This API only supports Oauth2.</aside><br> **Scopes:** `imchat:read, imchat:read:admin`<br> **[Rate Limit Label](https://marketplace.zoom.us/docs/api-reference/rate-limits#rate-limits):** `Heavy` # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.im_chat_sessions_with_http_info(_from, to, async_req=True)
>>> result = thread.get()
:param async_req bool
:param date _from: Start date in 'yyyy-mm-dd' format. The date range defined by the \"from\" and \"to\" parameters should only be one month as the report includes only one month worth of data at once. (required)
:param date to: End date. (required)
:param int page_size: The number of records returned within a single API call.
:param str next_page_token: The next page token is used to paginate through large result sets. A next page token will be returned whenever the set of available results exceeds the current page size. The expiration period for this token is 15 minutes.
:return: InlineResponse20020
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['_from', 'to', 'page_size', 'next_page_token'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method im_chat_sessions" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter '_from' is set
if ('_from' not in params or
params['_from'] is None):
raise ValueError("Missing the required parameter `_from` when calling `im_chat_sessions`") # noqa: E501
# verify the required parameter 'to' is set
if ('to' not in params or
params['to'] is None):
raise ValueError("Missing the required parameter `to` when calling `im_chat_sessions`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
if '_from' in params:
query_params.append(('from', params['_from'])) # noqa: E501
if 'to' in params:
query_params.append(('to', params['to'])) # noqa: E501
if 'page_size' in params:
query_params.append(('page_size', params['page_size'])) # noqa: E501
if 'next_page_token' in params:
query_params.append(('next_page_token', params['next_page_token'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/xml']) # noqa: E501
# Authentication setting
auth_settings = ['OAuth'] # noqa: E501
return self.api_client.call_api(
'/im/chat/sessions', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='InlineResponse20020', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def listimmessages(self, user_id, **kwargs): # noqa: E501
"""Get User’s IM Messages # noqa: E501
Get IM Chat messages for a specified period of time. <aside>Note: This API only supports Oauth2.</aside><br><br> **Scopes:** `imchat:read`<br> **[Rate Limit Label](https://marketplace.zoom.us/docs/api-reference/rate-limits#rate-limits):** `Medium`<br> # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.listimmessages(user_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str user_id: The user ID or email address. (required)
:param str chat_user: Chat user's ID or email address.
:param str channel: IM Channel's ID.
:param str _date: IM message's query date time, format as yyyy-MM-dd.
:param int page_size: The number of records returned within a single API call.
:param str next_page_token: The next page token is used to paginate through large result sets. A next page token will be returned whenever the set of available results exceeds the current page size. The expiration period for this token is 15 minutes.
:return: InlineResponse20060
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.listimmessages_with_http_info(user_id, **kwargs) # noqa: E501
else:
(data) = self.listimmessages_with_http_info(user_id, **kwargs) # noqa: E501
return data
def listimmessages_with_http_info(self, user_id, **kwargs): # noqa: E501
"""Get User’s IM Messages # noqa: E501
Get IM Chat messages for a specified period of time. <aside>Note: This API only supports Oauth2.</aside><br><br> **Scopes:** `imchat:read`<br> **[Rate Limit Label](https://marketplace.zoom.us/docs/api-reference/rate-limits#rate-limits):** `Medium`<br> # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.listimmessages_with_http_info(user_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str user_id: The user ID or email address. (required)
:param str chat_user: Chat user's ID or email address.
:param str channel: IM Channel's ID.
:param str _date: IM message's query date time, format as yyyy-MM-dd.
:param int page_size: The number of records returned within a single API call.
:param str next_page_token: The next page token is used to paginate through large result sets. A next page token will be returned whenever the set of available results exceeds the current page size. The expiration period for this token is 15 minutes.
:return: InlineResponse20060
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['user_id', 'chat_user', 'channel', '_date', 'page_size', 'next_page_token'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method listimmessages" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'user_id' is set
if ('user_id' not in params or
params['user_id'] is None):
raise ValueError("Missing the required parameter `user_id` when calling `listimmessages`") # noqa: E501
collection_formats = {}
path_params = {}
if 'user_id' in params:
path_params['userId'] = params['user_id'] # noqa: E501
query_params = []
if 'chat_user' in params:
query_params.append(('chat_user', params['chat_user'])) # noqa: E501
if 'channel' in params:
query_params.append(('channel', params['channel'])) # noqa: E501
if '_date' in params:
query_params.append(('date', params['_date'])) # noqa: E501
if 'page_size' in params:
query_params.append(('page_size', params['page_size'])) # noqa: E501
if 'next_page_token' in params:
query_params.append(('next_page_token', params['next_page_token'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/xml']) # noqa: E501
# Authentication setting
auth_settings = ['OAuth'] # noqa: E501
return self.api_client.call_api(
'/im/users/{userId}/chat/messages', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='InlineResponse20060', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def sendimmessages(self, **kwargs): # noqa: E501
"""Send IM messages # noqa: E501
Send chat message to a user. <aside>Note: This API only supports OAuth 2.0.</aside><br><br>**Scope:** `imchat:write` **[Rate Limit Label](https://marketplace.zoom.us/docs/api-reference/rate-limits#rate-limits):** `Medium` # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.sendimmessages(async_req=True)
>>> result = thread.get()
:param async_req bool
:param Body117 body:
:param str chat_user: The email address (registered with Zoom) or the userId of the chat user.
:return: InlineResponse20122
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.sendimmessages_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.sendimmessages_with_http_info(**kwargs) # noqa: E501
return data
def sendimmessages_with_http_info(self, **kwargs): # noqa: E501
"""Send IM messages # noqa: E501
Send chat message to a user. <aside>Note: This API only supports OAuth 2.0.</aside><br><br>**Scope:** `imchat:write` **[Rate Limit Label](https://marketplace.zoom.us/docs/api-reference/rate-limits#rate-limits):** `Medium` # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.sendimmessages_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:param Body117 body:
:param str chat_user: The email address (registered with Zoom) or the userId of the chat user.
:return: InlineResponse20122
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['body', 'chat_user'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method sendimmessages" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if 'chat_user' in params:
query_params.append(('chat_user', params['chat_user'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
if 'message' in params:
form_params.append(('message', params['message'])) # noqa: E501
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/xml']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json', 'multipart/form-data']) # noqa: E501
# Authentication setting
auth_settings = ['OAuth'] # noqa: E501
return self.api_client.call_api(
'/im/users/me/chat/messages', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='InlineResponse20122', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def sendimmessages(self, **kwargs): # noqa: E501
"""Send IM messages # noqa: E501
Send chat message to a user. <aside>Note: This API only supports OAuth 2.0.</aside><br><br>**Scope:** `imchat:write` **[Rate Limit Label](https://marketplace.zoom.us/docs/api-reference/rate-limits#rate-limits):** `Medium` # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.sendimmessages(async_req=True)
>>> result = thread.get()
:param async_req bool
:param str message:
:param str chat_user: The email address (registered with Zoom) or the userId of the chat user.
:return: InlineResponse20122
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.sendimmessages_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.sendimmessages_with_http_info(**kwargs) # noqa: E501
return data
def sendimmessages_with_http_info(self, **kwargs): # noqa: E501
"""Send IM messages # noqa: E501
Send chat message to a user. <aside>Note: This API only supports OAuth 2.0.</aside><br><br>**Scope:** `imchat:write` **[Rate Limit Label](https://marketplace.zoom.us/docs/api-reference/rate-limits#rate-limits):** `Medium` # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.sendimmessages_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:param str message:
:param str chat_user: The email address (registered with Zoom) or the userId of the chat user.
:return: InlineResponse20122
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['message', 'chat_user'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method sendimmessages" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if 'chat_user' in params:
query_params.append(('chat_user', params['chat_user'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
if 'message' in params:
form_params.append(('message', params['message'])) # noqa: E501
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/xml']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json', 'multipart/form-data']) # noqa: E501
# Authentication setting
auth_settings = ['OAuth'] # noqa: E501
return self.api_client.call_api(
'/im/users/me/chat/messages', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='InlineResponse20122', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
| [
"github@rootalley.com"
] | github@rootalley.com |
0a68a4633fb48f6f75bb6216cb5f7f38def50fc8 | 6afe7ba26ed50d158110874cf3f9767f6a108d18 | /task_1_5.py | 03772558d7760dd1355f0de36fadd79f33b015d0 | [] | no_license | hejaziak/Regular-expression-to-NFA | 9603ded3b1459b6b17de838c410c2d101fc3d779 | f959fb0da21d2896ebee857cd201bac48765f3c6 | refs/heads/master | 2020-05-02T19:17:46.777728 | 2019-03-28T09:53:12 | 2019-03-28T09:53:12 | 178,155,410 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 672 | py | import argparse
import re
def task_1_5(args):
regex = re.compile("(?<==)\d+")
output_file = open("task_1_5_result.txt","w+")
with open(args.file+"/task1_5.txt", "r") as file:
for line in file:
matches = regex.findall(line)
if(matches):
for match in matches:
output_file.write(match+ "\n")
if __name__ == '__main__':
parser = argparse.ArgumentParser(add_help=True, description='Sample Commandline')
parser.add_argument('--file', action="store", help="path of file to take as input", nargs="?", metavar="file")
args = parser.parse_args()
print(args.file)
task_1_5(args) | [
"atherkhalid158@gmail.com"
] | atherkhalid158@gmail.com |
ff58cb835d1dc807632b42b73c6859e646dc8101 | b4f56ec36f1ba1bfb6bc5f29d04a9be55d0796cc | /src/main/python/customer_pb2.py | 4b3800360cd09db8d25ff025e05fe9e2b2580836 | [] | no_license | flipsyde59/trrp_3 | 7590120763954c2083b14abe60f685f98f57ea55 | 6b7eab3b74588bb0bf8541ed5f80fb17bd6ceddb | refs/heads/master | 2023-02-20T18:25:38.431082 | 2021-01-23T19:56:26 | 2021-01-23T19:56:26 | 332,179,946 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | true | 7,602 | py | # -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: customer.proto
"""Generated protocol buffer code."""
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='customer.proto',
package='trrp_3',
syntax='proto3',
serialized_options=b'\n\006trrp_3B\016CustomerProtos',
create_key=_descriptor._internal_create_key,
serialized_pb=b'\n\x0e\x63ustomer.proto\x12\x06trrp_3\"\xdf\x01\n\x08\x43ustomer\x12\n\n\x02id\x18\x01 \x01(\x05\x12\x11\n\tfirstName\x18\x02 \x01(\t\x12\x10\n\x08lastName\x18\x03 \x01(\t\x12-\n\x06\x65mails\x18\x04 \x03(\x0b\x32\x1d.trrp_3.Customer.EmailAddress\x1aG\n\x0c\x45mailAddress\x12\r\n\x05\x65mail\x18\x01 \x01(\t\x12(\n\x04type\x18\x02 \x01(\x0e\x32\x1a.trrp_3.Customer.EmailType\"*\n\tEmailType\x12\x10\n\x0cPROFESSIONAL\x10\x00\x12\x0b\n\x07PRIVATE\x10\x01\"0\n\rListCustomers\x12\x1f\n\x05items\x18\x01 \x03(\x0b\x32\x10.trrp_3.CustomerB\x18\n\x06trrp_3B\x0e\x43ustomerProtosb\x06proto3'
)
_CUSTOMER_EMAILTYPE = _descriptor.EnumDescriptor(
name='EmailType',
full_name='trrp_3.Customer.EmailType',
filename=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
values=[
_descriptor.EnumValueDescriptor(
name='PROFESSIONAL', index=0, number=0,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='PRIVATE', index=1, number=1,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
],
containing_type=None,
serialized_options=None,
serialized_start=208,
serialized_end=250,
)
_sym_db.RegisterEnumDescriptor(_CUSTOMER_EMAILTYPE)
_CUSTOMER_EMAILADDRESS = _descriptor.Descriptor(
name='EmailAddress',
full_name='trrp_3.Customer.EmailAddress',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='email', full_name='trrp_3.Customer.EmailAddress.email', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='type', full_name='trrp_3.Customer.EmailAddress.type', index=1,
number=2, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=135,
serialized_end=206,
)
_CUSTOMER = _descriptor.Descriptor(
name='Customer',
full_name='trrp_3.Customer',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='id', full_name='trrp_3.Customer.id', index=0,
number=1, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='firstName', full_name='trrp_3.Customer.firstName', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='lastName', full_name='trrp_3.Customer.lastName', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='emails', full_name='trrp_3.Customer.emails', index=3,
number=4, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[_CUSTOMER_EMAILADDRESS, ],
enum_types=[
_CUSTOMER_EMAILTYPE,
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=27,
serialized_end=250,
)
_LISTCUSTOMERS = _descriptor.Descriptor(
name='ListCustomers',
full_name='trrp_3.ListCustomers',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='items', full_name='trrp_3.ListCustomers.items', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=252,
serialized_end=300,
)
_CUSTOMER_EMAILADDRESS.fields_by_name['type'].enum_type = _CUSTOMER_EMAILTYPE
_CUSTOMER_EMAILADDRESS.containing_type = _CUSTOMER
_CUSTOMER.fields_by_name['emails'].message_type = _CUSTOMER_EMAILADDRESS
_CUSTOMER_EMAILTYPE.containing_type = _CUSTOMER
_LISTCUSTOMERS.fields_by_name['items'].message_type = _CUSTOMER
DESCRIPTOR.message_types_by_name['Customer'] = _CUSTOMER
DESCRIPTOR.message_types_by_name['ListCustomers'] = _LISTCUSTOMERS
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
Customer = _reflection.GeneratedProtocolMessageType('Customer', (_message.Message,), {
'EmailAddress' : _reflection.GeneratedProtocolMessageType('EmailAddress', (_message.Message,), {
'DESCRIPTOR' : _CUSTOMER_EMAILADDRESS,
'__module__' : 'customer_pb2'
# @@protoc_insertion_point(class_scope:trrp_3.Customer.EmailAddress)
})
,
'DESCRIPTOR' : _CUSTOMER,
'__module__' : 'customer_pb2'
# @@protoc_insertion_point(class_scope:trrp_3.Customer)
})
_sym_db.RegisterMessage(Customer)
_sym_db.RegisterMessage(Customer.EmailAddress)
ListCustomers = _reflection.GeneratedProtocolMessageType('ListCustomers', (_message.Message,), {
'DESCRIPTOR' : _LISTCUSTOMERS,
'__module__' : 'customer_pb2'
# @@protoc_insertion_point(class_scope:trrp_3.ListCustomers)
})
_sym_db.RegisterMessage(ListCustomers)
DESCRIPTOR._options = None
# @@protoc_insertion_point(module_scope)
| [
"flipsyde59@mail.ru"
] | flipsyde59@mail.ru |
fec187d97af48673db9a3cd1cb57dbaa81a53c2d | 28a462a28f443c285ca5efec181ebe36b147c167 | /tests/compile/basic/es2020/DeclarativeEnvironmentRecord.CreateImmutableBinding.spec | 32137e54076b90b69a23ec493062ece2b21f0272 | [
"BSD-3-Clause",
"BSD-2-Clause"
] | permissive | kaist-plrg/jstar | 63e71f9156860dc21cccc33a9f6c638dfee448ea | 1282919127ea18a7e40c7a55e63a1ddaaf7d9db4 | refs/heads/main | 2022-07-22T08:12:34.947712 | 2022-02-27T04:19:33 | 2022-02-27T11:06:14 | 384,045,526 | 6 | 4 | NOASSERTION | 2022-02-27T11:05:26 | 2021-07-08T07:53:21 | Python | UTF-8 | Python | false | false | 398 | spec | 1. Let _envRec_ be the declarative Environment Record for which the method was invoked.
1. Assert: _envRec_ does not already have a binding for _N_.
1. Create an immutable binding in _envRec_ for _N_ and record that it is uninitialized. If _S_ is *true*, record that the newly created binding is a strict binding.
1. Return NormalCompletion(~empty~). | [
"h2oche22@gmail.com"
] | h2oche22@gmail.com |
795c71f40d7e4b7b4ba2a1d84f255eb7b5f64b2d | faa965776fb422437332440a169d9980437e4fce | /text/cleaners.py | b2c8c9d1e2e3a65a3eb3e110beec2fb2eb299138 | [] | no_license | IMLHF/lpc-tracotron | 752ac707568098c870bf5db107dc9d184a7f853d | 5994f84bf828afe11da845fb5153080f673a653e | refs/heads/master | 2020-07-02T16:50:18.803338 | 2019-09-03T03:20:41 | 2019-09-03T03:20:41 | 201,594,812 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,398 | py | '''
Cleaners are transformations that run over the input text at both training and eval time.
Cleaners can be selected by passing a comma-delimited list of cleaner names as the "cleaners"
hyperparameter. Some cleaners are English-specific. You'll typically want to use:
1. "english_cleaners" for English text
2. "transliteration_cleaners" for non-English text that can be transliterated to ASCII using
the Unidecode library (https://pypi.python.org/pypi/Unidecode)
3. "basic_cleaners" if you do not want to transliterate (in this case, you should also update
the symbols in symbols.py to match your data).
'''
import re
from unidecode import unidecode
from .numbers import normalize_numbers
# Regular expression matching whitespace:
_whitespace_re = re.compile(r'\s+')
# List of (regular expression, replacement) pairs for abbreviations:
_abbreviations = [(re.compile('\\b%s\\.' % x[0], re.IGNORECASE), x[1]) for x in [
('mrs', 'misess'),
('mr', 'mister'),
('dr', 'doctor'),
('st', 'saint'),
('co', 'company'),
('jr', 'junior'),
('maj', 'major'),
('gen', 'general'),
('drs', 'doctors'),
('rev', 'reverend'),
('lt', 'lieutenant'),
('hon', 'honorable'),
('sgt', 'sergeant'),
('capt', 'captain'),
('esq', 'esquire'),
('ltd', 'limited'),
('col', 'colonel'),
('ft', 'fort'),
]]
def expand_abbreviations(text):
for regex, replacement in _abbreviations:
text = re.sub(regex, replacement, text)
return text
def expand_numbers(text):
return normalize_numbers(text)
def lowercase(text):
return text.lower()
def collapse_whitespace(text):
return re.sub(_whitespace_re, ' ', text)
def convert_to_ascii(text):
return unidecode(text)
def basic_cleaners(text):
'''Basic pipeline that lowercases and collapses whitespace without transliteration.'''
# text = lowercase(text)
text = collapse_whitespace(text)
return text
def transliteration_cleaners(text):
'''Pipeline for non-English text that transliterates to ASCII.'''
#text = convert_to_ascii(text)
# text = lowercase(text)
text = collapse_whitespace(text)
return text
def english_cleaners(text):
'''Pipeline for English text, including number and abbreviation expansion.'''
#text = convert_to_ascii(text)
# text = lowercase(text)
#text = expand_numbers(text)
#text = expand_abbreviations(text)
text = collapse_whitespace(text)
return text
| [
"red_wind@foxmail.com"
] | red_wind@foxmail.com |
e53399901313f79e9bf4ddfe04766f118f7b62e9 | dc2674334cec0ee2fe8643d012faa793f9e9aa15 | /ApiTestFramework/CodeGenerator.py | 83ff80ec12e637d024a356106a476982a564f324 | [] | no_license | seoktaehyeon/api-test-framework | 2ac13bcf0998709473e4afd55e0e318e5021bc87 | 3fb202785973ac8ca0544cba2fae3c9a5c90fc25 | refs/heads/master | 2020-07-02T06:32:12.128056 | 2019-12-23T12:01:55 | 2019-12-23T12:01:55 | 201,440,690 | 0 | 0 | null | 2019-12-23T08:19:30 | 2019-08-09T09:53:28 | Python | UTF-8 | Python | false | false | 2,303 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
class CodeGenerator(object):
def __init__(self):
pass
@staticmethod
def new_test_suite(suite_name, case_content):
return '\n'.join([
'#!/usr/bin/env python3',
'# -*- coding: utf-8 -*-',
'',
'from ApiTestFramework.CaseExecutor import CaseExecutor',
'',
'',
'class Test%s(object):' % suite_name,
'',
' def setup_class(self):',
' self.ce = CaseExecutor()',
' self.ce.setup_class()',
'',
' def teardown_class(self):',
' self.ce.teardown_class()',
'',
' def setup_method(self):',
' self.ce.setup_method()',
'',
' def teardown_method(self):',
' self.ce.teardown_method()',
case_content
])
@staticmethod
def append_test_case(case_content, test_function, test_suite, test_case, test_case_title):
return '\n'.join([
case_content,
' def test_%s(self):' % test_function,
' self.ce.get_test_case_requests(',
' test_suite=\'%s\',' % test_suite,
' test_case=\'%s\',' % test_case,
' test_case_title=\'%s\'' % test_case_title,
' )',
' for test_request_data in self.ce.test_requests_data:',
' self.ce.exec_test_case(test_request_data)',
'',
])
@staticmethod
def _setup_teardown(script_name):
return '\n'.join([
'#!/usr/bin/env python3',
'# -*- coding: utf-8 -*-',
'',
'',
'def run(test_env: dict):',
' # This is a %s script' % script_name,
' pass',
''
])
def new_setup_suite(self):
return self._setup_teardown('suite Setup')
def new_teardown_suite(self):
return self._setup_teardown('suite TearDown')
def new_setup_case(self):
return self._setup_teardown('case Setup')
def new_teardown_case(self):
return self._setup_teardown('case TearDown')
| [
"seoktaehyeon@msn.com"
] | seoktaehyeon@msn.com |
7d9b8ff5c86e7a469c4e54991a98f844dbd57066 | e4cab6feadcee618f092f23020a157c8ded42ffc | /Basics/Matrix/homework.py | 524046cb9ab52c37bb822c2aedc925bed9786d01 | [] | no_license | Larionov0/Group3_Lessons | 7c314898a70c61aa445db37383076e211692b56b | 628bc7efe6817d107cb39d3017cb7cee44b86ba4 | refs/heads/master | 2023-08-22T07:14:44.595963 | 2021-10-17T11:48:06 | 2021-10-17T11:48:06 | 339,141,183 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 241 | py | matrix = [
[1, 2, 3, 4],
[5, 6, 7, 8],
[9, 10, 11, 12],
[13, 14, 15, 16]
]
while True:
i = int(input('enter number from 0 to 4: i = '))
j = int(input('enter number from 0 to 4: j = '))
print(matrix[i][j])
| [
"larionov1001@gmail.com"
] | larionov1001@gmail.com |
e399cabcd4f8df0189068155cfc0ec319f106225 | 0a5e2a761508d0743f834c711cc8a43c1f07d0cb | /lambda.py | 54a6acd37a290199b3c9077478c15d69b5909a11 | [
"MIT"
] | permissive | yegorski/terraform-aws-api-gateway-cognito | 682286e1bb8324f081bd6cbbedfb4538bf3434df | 1ea74bc051d7e36039a66afd44fff42e9cf87c37 | refs/heads/master | 2022-07-31T01:43:07.821956 | 2020-05-17T02:47:02 | 2020-05-17T02:47:02 | 199,242,254 | 7 | 1 | null | null | null | null | UTF-8 | Python | false | false | 138 | py | import json
def lambda_handler(event, context):
return {
'statusCode': 200,
'body': json.dumps('Hello World!')
}
| [
"yegor.ius@enigma.com"
] | yegor.ius@enigma.com |
84820ad05b69152987c822b982840cd94202f499 | ce086e50a671567a865eb93a221dda91567b8c5d | /planlos/polls/admin.py | 7b3c1e560103a509db06118debd31a7e29171511 | [] | no_license | planlos/planlos-django | a8d1481717ff62551adfda89c2523a0416510ba6 | 85620fbe3ff4e62531ec444690f62097fd3b1ffb | refs/heads/master | 2016-09-06T01:31:01.529625 | 2015-04-15T20:05:41 | 2015-04-15T20:05:41 | 3,831,968 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 208 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from models import Poll, Choice, Hashtable
from django.contrib import admin
admin.site.register(Poll)
admin.site.register(Choice)
admin.site.register(Hashtable)
| [
"termine@planlosbremen.de"
] | termine@planlosbremen.de |
dcd36a6ddd565d15ba0b4d9ce5a4c21b4312262f | c2c21b0b5ad8f2b540cd8cddbee7660e77a557b2 | /While loop in python/break_in_while.py | 0dbfe250de8304c17a67faf65087f37b61eef7da | [] | no_license | umerfarooq01/Python-Full-Course-Code-From-Zero-To-Hero | 9c91d4f1a3ce1e6c8c0abe44b055627568feddf1 | 3fd06d031e5ae506c2ed0ba6c758a65d2f5f3334 | refs/heads/master | 2021-05-20T18:58:20.799674 | 2020-04-02T07:08:55 | 2020-04-02T07:08:55 | 252,380,966 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 281 | py | # In this programe we learn about breack statement in while loop, we use this statement
# when we want to break our loop on a specfic value or resut.
import random
i=1
while i<=10:
newR=random.randint(0,20)
print(newR)
if newR==6:
print("Break")
break
| [
"umerfarooq4352@gmail.com"
] | umerfarooq4352@gmail.com |
ab90728d6722c552d2f58b587ff5ee17a0c431e6 | 9d2f097e0fcd753e2d4aa3beaa707b53bc319654 | /sec3-basic/lesson11.py | 6894b5ffe8b8b388a1815baa0afaff26bb32e9fe | [] | no_license | ei1125/python-udemy-lesson | 7834bdc490cca1dd7e37104c39ff42e35defa506 | 2f149ea7c7dea3a0677bd1ce9a1899f01df3fab5 | refs/heads/master | 2022-12-08T09:46:40.222827 | 2020-08-29T14:11:48 | 2020-08-29T14:11:48 | 283,484,278 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 404 | py | print("I don't know")
print('I don\'t know')
print('say "I don\'t know"')
print("say \"I don't know\"")
print('hello.\nHow are you?')
print(r'C:\name\name')
print("####################")
print("""\
line1
line2
line3
line4\
""")
print("####################")
print('Hi.' * 3 + 'Mike.')
print('Py''thon')
prefix = 'Py'
print(prefix + 'thon')
s = ('aaaaaaaaaaaaaaaaa'
'bbbbbbbbbbbbbbbbb')
print(s) | [
"i.dont.like.monday1125@gmail.com"
] | i.dont.like.monday1125@gmail.com |
922afb74fdeb65bf3a731c7e2f814a52234e3f75 | 8fd07ea363ba4263bafe25d213c72cc9a93e2b3e | /devops/Day1_fork_thread/Thread/5.凑够一定数量才能继续执行.py | d73b1743ccd66793d4ab5dc684274cdd8d96cd03 | [] | no_license | ml758392/python_tedu | 82e12ae014f0fc81230386fab07f901510fc8837 | 9f20798604db0ac8cd7b69d8c7a52ee361ebc7a7 | refs/heads/master | 2020-04-12T08:30:42.354663 | 2019-03-29T11:55:30 | 2019-03-29T11:55:30 | 162,386,878 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 334 | py | # -*-coding:utf-8-*-
import threading
import time
bar = threading.Barrier(6)
def run():
print('%s---start' % threading.current_thread().name)
time.sleep(1)
bar.wait()
print('%s---end' % threading.current_thread().name)
if __name__ == '__main__':
for i in range(5):
threading.Thread(target=run).start() | [
"yy.tedu.cn"
] | yy.tedu.cn |
45511d1dde906e9c081ce3587c8c7a18c584d89e | 342d26a4edfa424a0a442308f68b77275ac27bba | /dbs/dal/LogOperate.py | 6ef8e452c871804b183fec1cae773536fe1fc6f3 | [
"BSD-3-Clause"
] | permissive | icysun/opencanary_web | 02e95f0708b49a2cb080b71dcd9e9a436a1ad25c | 71156ef8d7f86e2bb380a69f584de9603bfba93a | refs/heads/master | 2023-03-09T14:31:16.457577 | 2021-02-18T11:01:23 | 2021-02-18T11:01:23 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,924 | py | #!/usr/bin/env python
# -*- coding:utf-8 -*-
"""
Author: pirogue
Purpose: 日志表操作
Site: http://pirogue.org
Created: 2018-08-03 17:32:54
"""
from dbs.initdb import DBSession
from dbs.models.HoneypotLog import OpencanaryLog
from dbs.models.Whiteip import Whiteip
from sqlalchemy import desc, asc, extract, func, distinct
from sqlalchemy.exc import InvalidRequestError
class LogOp:
"""增删改查"""
def __init__(self):
self.session = DBSession
def insert(self, dst_host, dst_port, honeycred, local_time, hostname, password, path, skin,\
useragent, username, session, localversion, remoteversion, df, idid, inin, lenlen, mac, outout,\
prec, proto, res, syn, tos, ttl, urgp, window, logtype, node_id, src_host, src_port, white, \
repo, ntp_cmd, args, cmd, banner_id, data, function, vnc_client_response, vnc_password, \
vnc_server_challenge, inputs, domain, headers_call_id, headers_content_length,headers_cseq, \
headers_from, headers_to, headers_via, community_string, requests, urg, psh, fin, \
appname, cltintname, database, language, servername, domainname):
loginsert = OpencanaryLog(dst_host=dst_host, dst_port=dst_port, honeycred=honeycred, local_time=local_time,\
hostname=hostname, password=password, path=path, skin=skin, useragent=useragent, username=username,\
session=session, localversion=localversion, remoteversion=remoteversion, df=df, idid=idid, inin=inin,\
lenlen=lenlen, mac=mac, outout=outout, prec=prec, proto=proto, res=res, syn=syn, tos=tos, ttl=ttl,\
urgp=urgp, window=window, logtype=logtype, node_id=node_id, src_host=src_host, src_port=src_port, white=white,\
# 扩表后的新加入库字段
repo=repo, ntp_cmd=ntp_cmd, args=args, cmd=cmd, banner_id=banner_id, data=data, function=function, \
vnc_client_response=vnc_client_response, vnc_password=vnc_password, vnc_server_challenge=vnc_server_challenge, \
inputs=inputs, domain=domain, headers_call_id=headers_call_id, headers_content_length=headers_content_length, \
headers_cseq=headers_cseq, headers_from=headers_from, headers_to=headers_to, headers_via=headers_via, \
community_string=community_string, requests=requests, urg=urg, psh=psh, fin=fin, \
appname=appname, cltintname=cltintname, database=database, language=language, servername=servername, domainname=domainname)
if loginsert:
try:
self.session.add(loginsert)
self.session.commit()
return True
except InvalidRequestError:
self.session.rollback()
except Exception as e:
print(e)
finally:
self.session.close()
else:
return False
# 查询日志表攻击列表数据
def page_select_attack(self, page_index):
try:
page_size = 10
# num = 10*int(page) - 10
logselect = self.session.query(OpencanaryLog).filter(
OpencanaryLog.white == 2).order_by(
desc(OpencanaryLog.local_time),
OpencanaryLog.id).limit(page_size).offset(
(page_index - 1) * page_size)
return logselect
except InvalidRequestError:
self.session.rollback()
except Exception as e:
print(e)
finally:
self.session.close()
# 查询日志表白名单数据
def page_select_white(self, page_index):
try:
page_size = 10
# num = 10*int(page) - 10
logselect = self.session.query(OpencanaryLog).filter(
OpencanaryLog.white == 1).order_by(
desc(OpencanaryLog.local_time),
OpencanaryLog.id).limit(page_size).offset(
(page_index - 1) * page_size)
return logselect
except InvalidRequestError:
self.session.rollback()
except Exception as e:
print(e)
finally:
self.session.close()
# 查询当年每月攻击数量
def attack_select_num(self, months):
try:
attack_num = self.session.query(
extract('month', OpencanaryLog.local_time).label('month'),
func.count('*').label('count')).filter(
extract('year', OpencanaryLog.local_time) == months,
OpencanaryLog.white == 2).group_by('month').all()
return attack_num
except InvalidRequestError:
self.session.rollback()
except Exception as e:
print(e)
finally:
self.session.close()
# 查询当年每月白名单内攻击数量
def white_select_num(self, months):
try:
white_num = self.session.query(
extract('month', OpencanaryLog.local_time).label('month'),
func.count('*').label('count')).filter(
extract('year', OpencanaryLog.local_time) == months,
OpencanaryLog.white == 1).group_by('month').all()
return white_num
except InvalidRequestError:
self.session.rollback()
except Exception as e:
print(e)
finally:
self.session.close()
# 查询各类攻击数量
def pie_select_num(self, years):
try:
pie_num = self.session.query(
func.count(OpencanaryLog.logtype),
OpencanaryLog.logtype).group_by(OpencanaryLog.logtype).filter(
extract('year', OpencanaryLog.local_time) == years,
OpencanaryLog.white == 2).all()
return pie_num
except InvalidRequestError:
self.session.rollback()
except Exception as e:
print(e)
finally:
self.session.close()
# 查询攻击数据总量
def select_attack_total(self):
try:
total_attack = self.session.query(
func.count(OpencanaryLog.id)).filter(
OpencanaryLog.white == 2).scalar()
return total_attack
except InvalidRequestError:
self.session.rollback()
except Exception as e:
print(e)
finally:
self.session.close()
# 查询过滤列表总量
def select_filter_total(self):
try:
total_filter = self.session.query(
func.count(OpencanaryLog.id)).filter(
OpencanaryLog.white == 1).scalar()
return total_filter
except InvalidRequestError:
self.session.rollback()
except Exception as e:
print(e)
finally:
self.session.close() | [
"p1r06u3@gmail.com"
] | p1r06u3@gmail.com |
8365695a37b1717abeeb9271dabd58743f2349c9 | 414393a5048e5212223051d6a5541ecb873bcc53 | /cifar100_Resnet/main_half_clean_B3_20180911.py | 147079ab5dc22ce22a180aaa401fa66475b53c22 | [] | no_license | byh1321/CIFAR100_Distorted_Channel_Selective | 5a0fc1107ab9d60ce12504a8e474144762eda8df | 897f2dea4e645329dfc3bf3df6b147c783bfa83f | refs/heads/master | 2020-03-21T02:31:24.024771 | 2019-08-12T05:59:53 | 2019-08-12T05:59:53 | 138,002,631 | 0 | 0 | null | 2019-08-02T02:26:49 | 2018-06-20T08:26:51 | Python | UTF-8 | Python | false | false | 34,201 | py | """
some parts of code are extracted from "https://github.com/kuangliu/pytorch-cifar"
I modified some parts for our experiment
"""
from __future__ import print_function
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
import torchvision.datasets as dset
import torch.backends.cudnn as cudnn
import torch.nn.functional as F
import torchvision.transforms as transforms
from torch.utils.data import DataLoader
from torch.autograd import Variable
from utils import progress_bar
import os
import argparse
#import VGG16
#import Resnet_vision as RS
import Resnet34 as RS2
import Resnet18 as RS
import cifar_dirty_test
import cifar_dirty_train
import struct
import random
parser = argparse.ArgumentParser(description='PyTorch CIFAR10 Training')
parser.add_argument('--lr', default=0.1, type=float, help='learning rate')
parser.add_argument('--resume', '-r', action='store_true', help='resume from checkpoint')
parser.add_argument('--se', default=0, type=int, help='start epoch')
parser.add_argument('--ne', default=0, type=int, help='number of epoch')
parser.add_argument('--bs', default=128, type=int, help='batch size')
parser.add_argument('--mode', default=1, type=int, help='train or inference') #mode=1 is train, mode=0 is inference
parser.add_argument('--fixed', type=int, default=0, metavar='N',help='fixed=0 - floating point arithmetic')
parser.add_argument('--network', default='NULL', help='input network ckpt name', metavar="FILE")
parser.add_argument('--outputfile', default='garbage.txt', help='output file name', metavar="FILE")
args = parser.parse_args()
use_cuda = torch.cuda.is_available()
best_acc = 0 # best test accuracy
use_cuda = torch.cuda.is_available()
transform_train = transforms.Compose([transforms.RandomCrop(32,padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406],std=[0.229, 0.224, 0.225])])
transform_test = transforms.Compose([transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406],std=[0.229, 0.224, 0.225])])
cifar_train = dset.CIFAR100("./", train=True, transform=transform_train, target_transform=None, download=True)
cifar_test = dset.CIFAR100("./", train=False, transform=transform_test, target_transform=None, download=True)
cifar_test_gaussian_025 = cifar_dirty_test.CIFAR100DIRTY_TEST("/home/yhbyun/180614_cifar_VGG16/cifar100_gaussian_0.25_blur_0.0_test_targets.csv")
cifar_test_gaussian_016 = cifar_dirty_test.CIFAR100DIRTY_TEST("/home/yhbyun/180614_cifar_VGG16/cifar100_gaussian_0.16_blur_0.0_test_targets.csv")
cifar_test_gaussian_008 = cifar_dirty_test.CIFAR100DIRTY_TEST("/home/yhbyun/180614_cifar_VGG16/cifar100_gaussian_0.08_blur_0.0_test_targets.csv")
cifar_train_gaussian_025 = cifar_dirty_train.CIFAR100DIRTY_TRAIN("/home/yhbyun/180614_cifar_VGG16/cifar100_gaussian_0.25_blur_0.0_train_targets.csv")
cifar_train_gaussian_016 = cifar_dirty_train.CIFAR100DIRTY_TRAIN("/home/yhbyun/180614_cifar_VGG16/cifar100_gaussian_0.16_blur_0.0_train_targets.csv")
cifar_train_gaussian_008 = cifar_dirty_train.CIFAR100DIRTY_TRAIN("/home/yhbyun/180614_cifar_VGG16/cifar100_gaussian_0.08_blur_0.0_train_targets.csv")
cifar_test_blur_10 = cifar_dirty_test.CIFAR100DIRTY_TEST("/home/yhbyun/180614_cifar_VGG16/cifar100_gaussian_0.0_blur_1.0_test_targets.csv")
cifar_test_blur_09 = cifar_dirty_test.CIFAR100DIRTY_TEST("/home/yhbyun/180614_cifar_VGG16/cifar100_gaussian_0.0_blur_0.9_test_targets.csv")
cifar_test_blur_08 = cifar_dirty_test.CIFAR100DIRTY_TEST("/home/yhbyun/A2S/cifar100_VGG16/cifar100_gaussian_0.0_blur_0.8_test_targets.csv")
cifar_test_blur_0675 = cifar_dirty_test.CIFAR100DIRTY_TEST("/home/yhbyun/180614_cifar_VGG16/cifar100_gaussian_0.0_blur_0.675_test_targets.csv")
cifar_test_blur_06 = cifar_dirty_test.CIFAR100DIRTY_TEST("/home/yhbyun/180614_cifar_VGG16/cifar100_gaussian_0.0_blur_0.6_test_targets.csv")
cifar_test_blur_05 = cifar_dirty_test.CIFAR100DIRTY_TEST("/home/yhbyun/180614_cifar_VGG16/cifar100_gaussian_0.0_blur_0.5_test_targets.csv")
cifar_test_blur_045 = cifar_dirty_test.CIFAR100DIRTY_TEST("/home/yhbyun/180614_cifar_VGG16/cifar100_gaussian_0.0_blur_0.45_test_targets.csv")
cifar_test_blur_04 = cifar_dirty_test.CIFAR100DIRTY_TEST("/home/yhbyun/180614_cifar_VGG16/cifar100_gaussian_0.0_blur_0.4_test_targets.csv")
cifar_test_blur_03 = cifar_dirty_test.CIFAR100DIRTY_TEST("/home/yhbyun/180614_cifar_VGG16/cifar100_gaussian_0.0_blur_0.3_test_targets.csv")
cifar_test_blur_066 = cifar_dirty_test.CIFAR100DIRTY_TEST("/home/yhbyun/180614_cifar_VGG16/cifar100_gaussian_0.0_blur_0.66_test_targets.csv")
cifar_test_blur_033 = cifar_dirty_test.CIFAR100DIRTY_TEST("/home/yhbyun/180614_cifar_VGG16/cifar100_gaussian_0.0_blur_0.33_test_targets.csv")
cifar_train_blur_10 = cifar_dirty_train.CIFAR100DIRTY_TRAIN("/home/yhbyun/180614_cifar_VGG16/cifar100_gaussian_0.0_blur_1.0_train_targets.csv")
cifar_train_blur_09 = cifar_dirty_train.CIFAR100DIRTY_TRAIN("/home/yhbyun/180614_cifar_VGG16/cifar100_gaussian_0.0_blur_0.9_train_targets.csv")
cifar_train_blur_08 = cifar_dirty_train.CIFAR100DIRTY_TRAIN("/home/yhbyun/A2S/cifar100_VGG16/cifar100_gaussian_0.0_blur_0.8_train_targets.csv")
cifar_train_blur_0675 = cifar_dirty_train.CIFAR100DIRTY_TRAIN("/home/yhbyun/180614_cifar_VGG16/cifar100_gaussian_0.0_blur_0.675_train_targets.csv")
cifar_train_blur_06 = cifar_dirty_train.CIFAR100DIRTY_TRAIN("/home/yhbyun/180614_cifar_VGG16/cifar100_gaussian_0.0_blur_0.6_train_targets.csv")
cifar_train_blur_05 = cifar_dirty_train.CIFAR100DIRTY_TRAIN("/home/yhbyun/180614_cifar_VGG16/cifar100_gaussian_0.0_blur_0.5_train_targets.csv")
cifar_train_blur_045 = cifar_dirty_train.CIFAR100DIRTY_TRAIN("/home/yhbyun/180614_cifar_VGG16/cifar100_gaussian_0.0_blur_0.45_train_targets.csv")
cifar_train_blur_04 = cifar_dirty_train.CIFAR100DIRTY_TRAIN("/home/yhbyun/180614_cifar_VGG16/cifar100_gaussian_0.0_blur_0.4_train_targets.csv")
cifar_train_blur_03 = cifar_dirty_train.CIFAR100DIRTY_TRAIN("/home/yhbyun/180614_cifar_VGG16/cifar100_gaussian_0.0_blur_0.3_train_targets.csv")
cifar_train_blur_066 = cifar_dirty_train.CIFAR100DIRTY_TRAIN("/home/yhbyun/180614_cifar_VGG16/cifar100_gaussian_0.0_blur_0.66_train_targets.csv")
cifar_train_blur_033 = cifar_dirty_train.CIFAR100DIRTY_TRAIN("/home/yhbyun/180614_cifar_VGG16/cifar100_gaussian_0.0_blur_0.33_train_targets.csv")
cifar_train_gaussian_025 = cifar_dirty_test.CIFAR100DIRTY_TEST("/home/yhbyun/180614_cifar_VGG16/cifar100_gaussian_0.25_blur_0.0_train_targets.csv")
cifar_train_blur_10 = cifar_dirty_test.CIFAR100DIRTY_TEST("/home/yhbyun/180614_cifar_VGG16/cifar100_gaussian_0.0_blur_1.0_train_targets.csv")
cifar_train_gaussian_008_blur_03_mixed = cifar_dirty_test.CIFAR100DIRTY_TEST("/home/yhbyun/180614_cifar_VGG16/cifar100_gaussian_0.08_blur_0.3_train_targets.csv")
cifar_train_gaussian_016_blur_06_mixed = cifar_dirty_test.CIFAR100DIRTY_TEST("/home/yhbyun/180614_cifar_VGG16/cifar100_gaussian_0.16_blur_0.6_train_targets.csv")
cifar_train_gaussian_008_blur_033_mixed = cifar_dirty_test.CIFAR100DIRTY_TEST("/home/yhbyun/180614_cifar_VGG16/cifar100_gaussian_0.08_blur_0.33_train_targets.csv")
cifar_train_gaussian_016_blur_066_mixed = cifar_dirty_test.CIFAR100DIRTY_TEST("/home/yhbyun/180614_cifar_VGG16/cifar100_gaussian_0.16_blur_0.66_train_targets.csv")
cifar_train_gaussian_016_blur_08_mixed = cifar_dirty_test.CIFAR100DIRTY_TEST("/home/yhbyun/A2S/cifar100_VGG16/cifar100_gaussian_0.16_blur_0.8_train_targets.csv")
cifar_train_gaussian_025_blur_10_mixed = cifar_dirty_test.CIFAR100DIRTY_TEST("/home/yhbyun/180614_cifar_VGG16/cifar100_gaussian_0.25_blur_1.0_train_targets.csv")
#train_loader = torch.utils.data.DataLoader(cifar_train,batch_size=args.bs, shuffle=True,num_workers=8,drop_last=False)
train_loader = torch.utils.data.DataLoader(torch.utils.data.ConcatDataset([cifar_train, cifar_train_blur_09, cifar_train_gaussian_008_blur_033_mixed]),batch_size=args.bs, shuffle=True,num_workers=8,drop_last=False)
test_loader = torch.utils.data.DataLoader(cifar_test_blur_09,batch_size=10000, shuffle=False,num_workers=8,drop_last=False)
class ResNet18(nn.Module):
def __init__(self):
super(ResNet18,self).__init__()
self.conv1 = nn.Sequential(
nn.Conv2d(3, 64, kernel_size=3, stride=1, padding=1, bias=False),
nn.BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True),
nn.ReLU(inplace=False),
nn.MaxPool2d(kernel_size=3, stride=2, padding=1, dilation=1),
)
self.layer1_basic1 = nn.Sequential(
nn.Conv2d(64, 64, kernel_size=3, stride=1, padding=1, bias=False),
nn.BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True),
nn.ReLU(inplace=False),
)
self.layer1_basic2 = nn.Sequential(
nn.Conv2d(64, 64, kernel_size=3, stride=1, padding=1, bias=False),
nn.BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True),
)
self.layer1_relu1 = nn.Sequential(
nn.ReLU(inplace=False),
)
self.layer1_basic3 = nn.Sequential(
nn.Conv2d(64, 64, kernel_size=3, stride=1, padding=1, bias=False),
nn.BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True),
nn.ReLU(inplace=False),
)
self.layer1_basic4 = nn.Sequential(
nn.Conv2d(64, 64, kernel_size=3, stride=1, padding=1, bias=False),
nn.BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True),
)
self.layer1_relu2 = nn.Sequential(
nn.ReLU(inplace=False),
)
self.layer2_basic1 = nn.Sequential(
nn.Conv2d(64, 128, kernel_size=3, stride=2, padding=1, bias=False),
nn.BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True),
nn.ReLU(inplace=False),
)
self.layer2_downsample = nn.Sequential(
nn.Conv2d(64, 128, kernel_size=1, stride=2, bias=False),
nn.BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True),
)
self.layer2_basic2 = nn.Sequential(
nn.Conv2d(128, 128, kernel_size=3, stride=1, padding=1, bias=False),
nn.BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True),
)
self.layer2_relu1 = nn.Sequential(
nn.ReLU(inplace=False),
)
self.layer2_basic3 = nn.Sequential(
nn.Conv2d(128, 128, kernel_size=3, stride=1, padding=1, bias=False),
nn.BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True),
nn.ReLU(inplace=False),
)
self.layer2_basic4 = nn.Sequential(
nn.Conv2d(128, 128, kernel_size=3, stride=1, padding=1, bias=False),
nn.BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True),
)
self.layer2_relu2 = nn.Sequential(
nn.ReLU(inplace=False),
)
self.layer3_basic1 = nn.Sequential(
nn.Conv2d(128, 256, kernel_size=3, stride=2, padding=1, bias=False),
nn.BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True),
nn.ReLU(inplace=False),
)
self.layer3_downsample = nn.Sequential(
nn.Conv2d(128, 256, kernel_size=1, stride=2, bias=False),
nn.BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True),
)
self.layer3_basic2 = nn.Sequential(
nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1, bias=False),
nn.BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True),
)
self.layer3_relu1 = nn.Sequential(
nn.ReLU(inplace=False),
)
self.layer3_basic3 = nn.Sequential(
nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1, bias=False),
nn.BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True),
nn.ReLU(inplace=False),
)
self.layer3_basic4 = nn.Sequential(
nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1, bias=False),
nn.BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True),
nn.ReLU(inplace=False),
)
self.layer3_relu2 = nn.Sequential(
nn.ReLU(inplace=False),
)
self.layer4_basic1 = nn.Sequential(
nn.Conv2d(256, 512, kernel_size=3, stride=2, padding=1, bias=False),
nn.BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True),
nn.ReLU(inplace=False),
)
self.layer4_downsample = nn.Sequential(
nn.Conv2d(256, 512, kernel_size=1, stride=2, bias=False),
nn.BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True),
)
self.layer4_basic2 = nn.Sequential(
nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=1, bias=False),
nn.BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True),
nn.ReLU(inplace=False),
)
self.layer4_relu1 = nn.Sequential(
nn.ReLU(inplace=False),
)
self.layer4_basic3 = nn.Sequential(
nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=1, bias=False),
nn.BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True),
nn.ReLU(inplace=False),
)
self.layer4_basic4 = nn.Sequential(
nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=1, bias=False),
nn.BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True),
nn.ReLU(inplace=False),
)
self.layer4_relu2 = nn.Sequential(
nn.ReLU(inplace=False),
)
self.linear = nn.Sequential(
nn.Linear(512, 100, bias=False)
)
self._initialize_weights()
def forward(self,x):
if args.fixed:
x = quant(x)
x = roundmax(x)
out = x.clone()
out = self.conv1(out)
residual = out
if args.fixed:
out = quant(out)
out = roundmax(out)
out = self.layer1_basic1(out)
if args.fixed:
out = quant(out)
out = roundmax(out)
out = self.layer1_basic2(out)
if args.fixed:
residual = quant(residual)
residual = roundmax(residual)
if args.fixed:
out = quant(out)
out = roundmax(out)
out += residual
out = self.layer1_relu1(out)
residual = out
out = self.layer1_basic3(out)
if args.fixed:
out = quant(out)
out = roundmax(out)
out = self.layer1_basic4(out)
if args.fixed:
residual = quant(residual)
residual = roundmax(residual)
if args.fixed:
out = quant(out)
out = roundmax(out)
out += residual
out = self.layer1_relu2(out)
residual = self.layer2_downsample(out)
out = self.layer2_basic1(out)
if args.fixed:
out = quant(out)
out = roundmax(out)
out = self.layer2_basic2(out)
if args.fixed:
residual = quant(residual)
residual = roundmax(residual)
if args.fixed:
out = quant(out)
out = roundmax(out)
out += residual
out = self.layer2_relu1(out)
residual = out
out = self.layer2_basic3(out)
if args.fixed:
out = quant(out)
out = roundmax(out)
out = self.layer2_basic4(out)
if args.fixed:
residual = quant(residual)
residual = roundmax(residual)
if args.fixed:
out = quant(out)
out = roundmax(out)
out += residual
out = self.layer2_relu2(out)
residual = self.layer3_downsample(out)
out = self.layer3_basic1(out)
if args.fixed:
out = quant(out)
out = roundmax(out)
out = self.layer3_basic2(out)
if args.fixed:
residual = quant(residual)
residual = roundmax(residual)
if args.fixed:
out = quant(out)
out = roundmax(out)
out += residual
out = self.layer3_relu1(out)
residual = out
out = self.layer3_basic3(out)
if args.fixed:
out = quant(out)
out = roundmax(out)
out = self.layer3_basic4(out)
if args.fixed:
residual = quant(residual)
residual = roundmax(residual)
if args.fixed:
out = quant(out)
out = roundmax(out)
out += residual
out = self.layer3_relu2(out)
residual = self.layer4_downsample(out)
out = self.layer4_basic1(out)
if args.fixed:
out = quant(out)
out = roundmax(out)
out = self.layer4_basic2(out)
if args.fixed:
residual = quant(residual)
residual = roundmax(residual)
if args.fixed:
out = quant(out)
out = roundmax(out)
out += residual
out = self.layer4_relu1(out)
residual = out
out = self.layer4_basic3(out)
if args.fixed:
out = quant(out)
out = roundmax(out)
out = self.layer4_basic4(out)
if args.fixed:
residual = quant(residual)
residual = roundmax(residual)
if args.fixed:
out = quant(out)
out = roundmax(out)
out += residual
out = self.layer4_relu2(out)
out = F.avg_pool2d(out, 2)
out = out.view(out.size(0), -1)
#print(out.size())
out = self.linear(out)
return out
def _initialize_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
#print(m)
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
#if m.bias is not None:
#nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
#nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.Linear):
#print(m)
nn.init.normal_(m.weight, 0, 0.01)
#nn.init.constant_(m.bias, 0)
def roundmax(input):
maximum = 2**args.iwidth-1
minimum = -maximum-1
input = F.relu(torch.add(input, -minimum))
input = F.relu(torch.add(torch.neg(input), maximum-minimum))
input = torch.add(torch.neg(input), maximum)
return input
def quant(input):
input = torch.round(input / (2 ** (-args.aprec))) * (2 ** (-args.aprec))
return input
def set_mask(mask, block, val):
if block == 0:
mask[0][:,:,:,:] = val
mask[1][:,:,:,:] = val
mask[2][:,:,:,:] = val
mask[3][:,:,:,:] = val
mask[4][:,:,:,:] = val
mask[5][:,:,:,:] = val
mask[6][:,:,:,:] = val
mask[7][:,:,:,:] = val
mask[8][:,:,:,:] = val
mask[9][:,:,:,:] = val
mask[10][:,:,:,:] = val
mask[11][:,:,:,:] = val
mask[12][:,:,:,:] = val
mask[13][:,:,:,:] = val
mask[14][:,:,:,:] = val
mask[15][:,:,:,:] = val
mask[16][:,:,:,:] = val
mask[17][:,:,:,:] = val
mask[18][:,:,:,:] = val
mask[19][:,:,:,:] = val
mask[20][:,:] = val
elif block == 1:
mask[0][0:55,:,:,:] = val
mask[1][0:55,0:55,:,:] = val
mask[2][0:55,0:55,:,:] = val
mask[3][0:55,0:55,:,:] = val
mask[4][0:55,0:55,:,:] = val
mask[5][0:111,0:55,:,:] = val
mask[6][0:111,0:111,:,:] = val
mask[7][0:111,0:111,:,:] = val
mask[8][0:111,0:111,:,:] = val
mask[9][0:223,0:111,:,:] = val
mask[10][0:223,0:223,:,:] = val
mask[11][0:223,0:223,:,:] = val
mask[12][0:223,0:223,:,:] = val
mask[13][0:447,0:223,:,:] = val
mask[14][0:447,0:447,:,:] = val
mask[15][0:447,0:447,:,:] = val
mask[16][0:447,0:447,:,:] = val
mask[17][0:111,0:55,:,:] = val
mask[18][0:223,0:111,:,:] = val
mask[19][0:447,0:223,:,:] = val
mask[20][:,0:447] = val
elif block == 2:
mask[0][0:47,:,:,:] = val
mask[1][0:47,0:47,:,:] = val
mask[2][0:47,0:47,:,:] = val
mask[3][0:47,0:47,:,:] = val
mask[4][0:47,0:47,:,:] = val
mask[5][0:95,0:47,:,:] = val
mask[6][0:95,0:95,:,:] = val
mask[7][0:95,0:95,:,:] = val
mask[8][0:95,0:95,:,:] = val
mask[9][0:191,0:95,:,:] = val
mask[10][0:191,0:191,:,:] = val
mask[11][0:191,0:191,:,:] = val
mask[12][0:191,0:191,:,:] = val
mask[13][0:383,0:191,:,:] = val
mask[14][0:383,0:383,:,:] = val
mask[15][0:383,0:383,:,:] = val
mask[16][0:383,0:383,:,:] = val
mask[17][0:95,0:47,:,:] = val
mask[18][0:191,0:95,:,:] = val
mask[19][0:383,0:191,:,:] = val
mask[20][:,0:383] = val
elif block == 3:
mask[0][0:39,:,:,:] = val
mask[1][0:39,0:39,:,:] = val
mask[2][0:39,0:39,:,:] = val
mask[3][0:39,0:39,:,:] = val
mask[4][0:39,0:39,:,:] = val
mask[5][0:79,0:39,:,:] = val
mask[6][0:79,0:79,:,:] = val
mask[7][0:79,0:79,:,:] = val
mask[8][0:79,0:79,:,:] = val
mask[9][0:159,0:79,:,:] = val
mask[10][0:159,0:159,:,:] = val
mask[11][0:159,0:159,:,:] = val
mask[12][0:159,0:159,:,:] = val
mask[13][0:319,0:159,:,:] = val
mask[14][0:319,0:319,:,:] = val
mask[15][0:319,0:319,:,:] = val
mask[16][0:319,0:319,:,:] = val
mask[17][0:79,0:39,:,:] = val
mask[18][0:159,0:79,:,:] = val
mask[19][0:319,0:159,:,:] = val
mask[20][:,0:319] = val
elif block == 4:
mask[0][0:31,:,:,:] = val
mask[1][0:31,0:31,:,:] = val
mask[2][0:31,0:31,:,:] = val
mask[3][0:31,0:31,:,:] = val
mask[4][0:31,0:31,:,:] = val
mask[5][0:63,0:31,:,:] = val
mask[6][0:63,0:63,:,:] = val
mask[7][0:63,0:63,:,:] = val
mask[8][0:63,0:63,:,:] = val
mask[9][0:127,0:63,:,:] = val
mask[10][0:127,0:127,:,:] = val
mask[11][0:127,0:127,:,:] = val
mask[12][0:127,0:127,:,:] = val
mask[13][0:255,0:127,:,:] = val
mask[14][0:255,0:255,:,:] = val
mask[15][0:255,0:255,:,:] = val
mask[16][0:255,0:255,:,:] = val
mask[17][0:63,0:31,:,:] = val
mask[18][0:127,0:63,:,:] = val
mask[19][0:255,0:127,:,:] = val
mask[20][:,0:255] = val
return mask
def save_network(layer):
for child in net2.children():
for param in child.conv1[0].parameters():
layer[0] = param.data
for child in net2.children():
for param in child.layer1_basic1[0].parameters():
layer[1] = param.data
for child in net2.children():
for param in child.layer1_basic2[0].parameters():
layer[2] = param.data
for child in net2.children():
for param in child.layer1_basic3[0].parameters():
layer[3] = param.data
for child in net2.children():
for param in child.layer1_basic4[0].parameters():
layer[4] = param.data
for child in net2.children():
for param in child.layer2_basic1[0].parameters():
layer[5] = param.data
for child in net2.children():
for param in child.layer2_basic2[0].parameters():
layer[6] = param.data
for child in net2.children():
for param in child.layer2_basic3[0].parameters():
layer[7] = param.data
for child in net2.children():
for param in child.layer2_basic4[0].parameters():
layer[8] = param.data
for child in net2.children():
for param in child.layer3_basic1[0].parameters():
layer[9] = param.data
for child in net2.children():
for param in child.layer3_basic2[0].parameters():
layer[10] = param.data
for child in net2.children():
for param in child.layer3_basic3[0].parameters():
layer[11] = param.data
for child in net2.children():
for param in child.layer3_basic4[0].parameters():
layer[12] = param.data
for child in net2.children():
for param in child.layer4_basic1[0].parameters():
layer[13] = param.data
for child in net2.children():
for param in child.layer4_basic2[0].parameters():
layer[14] = param.data
for child in net2.children():
for param in child.layer4_basic3[0].parameters():
layer[15] = param.data
for child in net2.children():
for param in child.layer4_basic4[0].parameters():
layer[16] = param.data
for child in net2.children():
for param in child.layer2_downsample[0].parameters():
layer[17] = param.data
for child in net2.children():
for param in child.layer3_downsample[0].parameters():
layer[18] = param.data
for child in net2.children():
for param in child.layer4_downsample[0].parameters():
layer[19] = param.data
for child in net2.children():
for param in child.linear[0].parameters():
layer[20] = param.data
return layer
def add_network():
layer = torch.load('mask_null.dat')
layer = save_network(layer)
for child in net.children():
for param in child.conv1[0].parameters():
param.data = torch.add(param.data,layer[0])
for child in net.children():
for param in child.layer1_basic1[0].parameters():
param.data = torch.add(param.data,layer[1])
for child in net.children():
for param in child.layer1_basic2[0].parameters():
param.data = torch.add(param.data,layer[2])
for child in net.children():
for param in child.layer1_basic3[0].parameters():
param.data = torch.add(param.data,layer[3])
for child in net.children():
for param in child.layer1_basic4[0].parameters():
param.data = torch.add(param.data,layer[4])
for child in net.children():
for param in child.layer2_basic1[0].parameters():
param.data = torch.add(param.data,layer[5])
for child in net.children():
for param in child.layer2_basic2[0].parameters():
param.data = torch.add(param.data,layer[6])
for child in net.children():
for param in child.layer2_basic3[0].parameters():
param.data = torch.add(param.data,layer[7])
for child in net.children():
for param in child.layer2_basic4[0].parameters():
param.data = torch.add(param.data,layer[8])
for child in net.children():
for param in child.layer3_basic1[0].parameters():
param.data = torch.add(param.data,layer[9])
for child in net.children():
for param in child.layer3_basic2[0].parameters():
param.data = torch.add(param.data,layer[10])
for child in net.children():
for param in child.layer3_basic3[0].parameters():
param.data = torch.add(param.data,layer[11])
for child in net.children():
for param in child.layer3_basic4[0].parameters():
param.data = torch.add(param.data,layer[12])
for child in net.children():
for param in child.layer4_basic1[0].parameters():
param.data = torch.add(param.data,layer[13])
for child in net.children():
for param in child.layer4_basic2[0].parameters():
param.data = torch.add(param.data,layer[14])
for child in net.children():
for param in child.layer4_basic3[0].parameters():
param.data = torch.add(param.data,layer[15])
for child in net.children():
for param in child.layer4_basic4[0].parameters():
param.data = torch.add(param.data,layer[16])
for child in net.children():
for param in child.layer2_downsample[0].parameters():
param.data = torch.add(param.data,layer[17])
for child in net.children():
for param in child.layer3_downsample[0].parameters():
param.data = torch.add(param.data,layer[18])
for child in net.children():
for param in child.layer4_downsample[0].parameters():
param.data = torch.add(param.data,layer[19])
for child in net.children():
for param in child.linear[0].parameters():
param.data = torch.add(param.data,layer[20])
return layer
def net_mask_mul(mask):
for child in net.children():
for param in child.conv1[0].parameters():
param.data = torch.mul(param.data,mask[0].cuda())
for child in net.children():
for param in child.layer1_basic1[0].parameters():
param.data = torch.mul(param.data,mask[1].cuda())
for child in net.children():
for param in child.layer1_basic2[0].parameters():
param.data = torch.mul(param.data,mask[2].cuda())
for child in net.children():
for param in child.layer1_basic3[0].parameters():
param.data = torch.mul(param.data,mask[3].cuda())
for child in net.children():
for param in child.layer1_basic4[0].parameters():
param.data = torch.mul(param.data,mask[4].cuda())
for child in net.children():
for param in child.layer2_basic1[0].parameters():
param.data = torch.mul(param.data,mask[5].cuda())
for child in net.children():
for param in child.layer2_basic2[0].parameters():
param.data = torch.mul(param.data,mask[6].cuda())
for child in net.children():
for param in child.layer2_basic3[0].parameters():
param.data = torch.mul(param.data,mask[7].cuda())
for child in net.children():
for param in child.layer2_basic4[0].parameters():
param.data = torch.mul(param.data,mask[8].cuda())
for child in net.children():
for param in child.layer3_basic1[0].parameters():
param.data = torch.mul(param.data,mask[9].cuda())
for child in net.children():
for param in child.layer3_basic2[0].parameters():
param.data = torch.mul(param.data,mask[10].cuda())
for child in net.children():
for param in child.layer3_basic3[0].parameters():
param.data = torch.mul(param.data,mask[11].cuda())
for child in net.children():
for param in child.layer3_basic4[0].parameters():
param.data = torch.mul(param.data,mask[12].cuda())
for child in net.children():
for param in child.layer4_basic1[0].parameters():
param.data = torch.mul(param.data,mask[13].cuda())
for child in net.children():
for param in child.layer4_basic2[0].parameters():
param.data = torch.mul(param.data,mask[14].cuda())
for child in net.children():
for param in child.layer4_basic3[0].parameters():
param.data = torch.mul(param.data,mask[15].cuda())
for child in net.children():
for param in child.layer4_basic4[0].parameters():
param.data = torch.mul(param.data,mask[16].cuda())
for child in net.children():
for param in child.layer2_downsample[0].parameters():
param.data = torch.mul(param.data,mask[17].cuda())
for child in net.children():
for param in child.layer3_downsample[0].parameters():
param.data = torch.mul(param.data,mask[18].cuda())
for child in net.children():
for param in child.layer4_downsample[0].parameters():
param.data = torch.mul(param.data,mask[19].cuda())
for child in net.children():
for param in child.linear[0].parameters():
param.data = torch.mul(param.data,mask[20].cuda())
# Model
if args.mode == 0:
print('==> Resuming from checkpoint..')
assert os.path.isdir('checkpoint'), 'Error: no checkpoint directory found!'
checkpoint = torch.load('./checkpoint/ckpt_20180911_half_clean_B3.t0')
net = checkpoint['net']
elif args.mode == 1:
checkpoint = torch.load('./checkpoint/ckpt_20180911_half_clean_B3.t0')
ckpt = torch.load('./checkpoint/ckpt_20180911_half_clean_B2.t0')
net = checkpoint['net']
net2 = ckpt['net']
if args.resume:
print('==> Resuming from checkpoint..')
best_acc = checkpoint['acc']
else:
best_acc = 0
if use_cuda:
net.cuda()
net = torch.nn.DataParallel(net, device_ids=range(0,8))
if args.mode > 0:
net2.cuda()
net2 = torch.nn.DataParallel(net2, device_ids=range(torch.cuda.device_count()))
cudnn.benchmark = True
'''
for child in net.children():
for param in child.conv1[0].parameters():
print(param.size())
for child in net.children():
for param in child.layer1_basic1[0].parameters():
print(param.size())
for child in net.children():
for param in child.layer1_basic2[0].parameters():
print(param.size())
for child in net.children():
for param in child.layer1_basic3[0].parameters():
print(param.size())
for child in net.children():
for param in child.layer1_basic4[0].parameters():
print(param.size())
for child in net.children():
for param in child.layer2_basic1[0].parameters():
print(param.size())
for child in net.children():
for param in child.layer2_basic2[0].parameters():
print(param.size())
for child in net.children():
for param in child.layer2_basic3[0].parameters():
print(param.size())
for child in net.children():
for param in child.layer2_basic4[0].parameters():
print(param.size())
for child in net.children():
for param in child.layer3_basic1[0].parameters():
print(param.size())
for child in net.children():
for param in child.layer3_basic2[0].parameters():
print(param.size())
for child in net.children():
for param in child.layer3_basic3[0].parameters():
print(param.size())
for child in net.children():
for param in child.layer3_basic4[0].parameters():
print(param.size())
for child in net.children():
for param in child.layer4_basic1[0].parameters():
print(param.size())
for child in net.children():
for param in child.layer4_basic2[0].parameters():
print(param.size())
for child in net.children():
for param in child.layer4_basic3[0].parameters():
print(param.size())
for child in net.children():
for param in child.layer4_basic4[0].parameters():
print(param.size())
for child in net.children():
for param in child.layer2_downsample[0].parameters():
print(param.size())
for child in net.children():
for param in child.layer3_downsample[0].parameters():
print(param.size())
for child in net.children():
for param in child.layer3_downsample[0].parameters():
print(param.size())
for child in net.children():
for param in child.linear[0].parameters():
print(param.size())
'''
criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(net.parameters(), lr=args.lr, momentum=0.9, weight_decay=5e-4)
start_epoch = args.se
num_epoch = args.ne
# Training
def train(epoch):
print('\nEpoch: %d' % epoch)
net.train()
train_loss = 0
correct = 0
total = 0
mask_channel = torch.load('mask_null.dat')
mask_channel = set_mask(set_mask(mask_channel, 0, 1), 2, 0)
for batch_idx, (inputs, targets) in enumerate(train_loader):
if use_cuda:
inputs, targets = inputs.cuda(), targets.cuda()
optimizer.zero_grad()
inputs, targets = Variable(inputs), Variable(targets)
outputs = net(inputs)
loss = criterion(outputs, targets)
loss.backward()
net_mask_mul(mask_channel)
'''
for child in net.children():
for param in child.conv1[0].parameters():
for i in range(param.size()[0]):
for j in range(param.size()[1]):
print(param[i,j])
print("======================================================")
add_network()
for child in net.children():
for param in child.conv1[0].parameters():
for i in range(param.size()[0]):
for j in range(param.size()[1]):
print(param[i,j])
exit()
'''
add_network()
optimizer.step()
train_loss += loss.data[0]
_, predicted = torch.max(outputs.data, 1)
total += targets.size(0)
correct += predicted.eq(targets.data).cpu().sum().item()
progress_bar(batch_idx, len(train_loader), 'Loss: %.3f | Acc: %.3f%% (%d/%d)'
% (train_loss/(batch_idx+1), 100.*correct/total, correct, total))
def test():
global best_acc
net.eval()
test_loss = 0
correct = 0
total = 0
for batch_idx, (inputs, targets) in enumerate(test_loader):
if use_cuda:
inputs, targets = inputs.cuda(), targets.cuda()
inputs, targets = Variable(inputs, volatile=True), Variable(targets)
outputs = net(inputs)
loss = criterion(outputs, targets)
test_loss += loss.data[0]
_, predicted = torch.max(outputs.data, 1)
total += targets.size(0)
correct += predicted.eq(targets.data).cpu().sum().item()
progress_bar(batch_idx, len(test_loader), 'Loss: %.3f | Acc: %.3f%% (%d/%d)'
% (test_loss/(batch_idx+1), 100.*correct/total, correct, total))
# Save checkpoint.
acc = 100.*correct/total
if acc > best_acc:
if args.mode == 0:
pass
else:
print('Saving..')
state = {
'net': net.module if use_cuda else net,
'acc': acc,
}
if not os.path.isdir('checkpoint'):
os.mkdir('checkpoint')
torch.save(state, './checkpoint/ckpt_20180911_half_clean_B3.t0')
best_acc = acc
return acc
# Train+inference vs. Inference
mode = args.mode
if mode == 1: # mode=1 is training & inference @ each epoch
for epoch in range(start_epoch, start_epoch+num_epoch):
train(epoch)
test()
elif mode == 0: # only inference
test()
else:
pass
| [
"byh1321@naver.com"
] | byh1321@naver.com |
e1ec5c9508ce8facf6f66eecd10a5f6cd4e7b0a8 | 7fa5795af4ea431f923d4de851512e42d18b1207 | /software_package/in4073/dfu_serial/package.py | 52b11ac1a472c094942689f75c79b972b2928066 | [] | no_license | NielsHokke/DroneController | cb7d9fbf009e12ff3fd16d720e0a41bbb3f3ae88 | 11bec3b36987e03c581fc6e9fb63c2d121285c9c | refs/heads/master | 2020-03-14T07:36:20.619926 | 2018-06-26T21:55:11 | 2018-06-26T21:55:11 | 131,508,019 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 15,481 | py | # Copyright (c) 2015, Nordic Semiconductor
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of Nordic Semiconductor ASA nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# Python standard library
import os
import tempfile
import shutil
# 3rd party libraries
from zipfile import ZipFile
import hashlib
# Nordic libraries
#from nordicsemi.exceptions import NordicSemiException
#from nordicsemi.dfu.nrfhex import *
#from nordicsemi.dfu.init_packet import *
#from nordicsemi.dfu.manifest import ManifestGenerator, Manifest
#from nordicsemi.dfu.model import HexType, FirmwareKeys
#from nordicsemi.dfu.crc16 import *
#from signing import Signing
class Package(object):
"""
Packages and unpacks Nordic DFU packages. Nordic DFU packages are zip files that contains firmware and meta-information
necessary for utilities to perform a DFU on nRF5X devices.
The internal data model used in Package is a dictionary. The dictionary is expressed like this in
json format:
{
"manifest": {
"bootloader": {
"bin_file": "asdf.bin",
"dat_file": "asdf.dat",
"init_packet_data": {
"application_version": null,
"device_revision": null,
"device_type": 5,
"firmware_hash": "asdfasdkfjhasdkfjashfkjasfhaskjfhkjsdfhasjkhf",
"softdevice_req": [
17,
18
]
}
}
}
Attributes application, bootloader, softdevice, softdevice_bootloader shall not be put into the manifest if they are null
"""
DEFAULT_DEV_TYPE = 0xFFFF
DEFAULT_DEV_REV = 0xFFFF
DEFAULT_APP_VERSION = 0xFFFFFFFF
DEFAULT_SD_REQ = [0xFFFE]
DEFAULT_DFU_VER = 0.5
MANIFEST_FILENAME = "manifest.json"
def __init__(self,
dev_type=DEFAULT_DEV_TYPE,
dev_rev=DEFAULT_DEV_REV,
app_version=DEFAULT_APP_VERSION,
sd_req=DEFAULT_SD_REQ,
app_fw=None,
bootloader_fw=None,
softdevice_fw=None,
dfu_ver=DEFAULT_DFU_VER,
key_file=None):
"""
Constructor that requires values used for generating a Nordic DFU package.
:param int dev_type: Device type init-packet field
:param int dev_rev: Device revision init-packet field
:param int app_version: App version init-packet field
:param list sd_req: Softdevice Requirement init-packet field
:param str app_fw: Path to application firmware file
:param str bootloader_fw: Path to bootloader firmware file
:param str softdevice_fw: Path to softdevice firmware file
:param float dfu_ver: DFU version to use when generating init-packet
:param str key_file: Path to Signing key file (PEM)
:return: None
"""
self.dfu_ver = dfu_ver
init_packet_vars = {}
if dev_type is not None:
init_packet_vars[PacketField.DEVICE_TYPE] = dev_type
if dev_rev is not None:
init_packet_vars[PacketField.DEVICE_REVISION] = dev_rev
if app_version is not None:
init_packet_vars[PacketField.APP_VERSION] = app_version
if sd_req is not None:
init_packet_vars[PacketField.REQUIRED_SOFTDEVICES_ARRAY] = sd_req
self.firmwares_data = {}
if app_fw:
self.__add_firmware_info(HexType.APPLICATION,
app_fw,
init_packet_vars)
if bootloader_fw:
self.__add_firmware_info(HexType.BOOTLOADER,
bootloader_fw,
init_packet_vars)
if softdevice_fw:
self.__add_firmware_info(HexType.SOFTDEVICE,
softdevice_fw,
init_packet_vars)
if key_file:
self.dfu_ver = 0.8
self.key_file = key_file
def generate_package(self, filename, preserve_work_directory=False):
"""
Generates a Nordic DFU package. The package is a zip file containing firmware(s) and metadata required
for Nordic DFU applications to perform DFU onn nRF5X devices.
:param str filename: Filename for generated package.
:param bool preserve_work_directory: True to preserve the temporary working directory.
Useful for debugging of a package, and if the user wants to look at the generated package without having to
unzip it.
:return: None
"""
work_directory = self.__create_temp_workspace()
if Package._is_bootloader_softdevice_combination(self.firmwares_data):
# Removing softdevice and bootloader data from dictionary and adding the combined later
softdevice_fw_data = self.firmwares_data.pop(HexType.SOFTDEVICE)
bootloader_fw_data = self.firmwares_data.pop(HexType.BOOTLOADER)
softdevice_fw_name = softdevice_fw_data[FirmwareKeys.FIRMWARE_FILENAME]
bootloader_fw_name = bootloader_fw_data[FirmwareKeys.FIRMWARE_FILENAME]
new_filename = "sd_bl.bin"
sd_bl_file_path = os.path.join(work_directory, new_filename)
nrf_hex = nRFHex(softdevice_fw_name, bootloader_fw_name)
nrf_hex.tobinfile(sd_bl_file_path)
softdevice_size = nrf_hex.size()
bootloader_size = nrf_hex.bootloadersize()
self.__add_firmware_info(HexType.SD_BL,
sd_bl_file_path,
softdevice_fw_data[FirmwareKeys.INIT_PACKET_DATA],
softdevice_size,
bootloader_size)
for key in self.firmwares_data:
firmware = self.firmwares_data[key]
# Normalize the firmware file and store it in the work directory
firmware[FirmwareKeys.BIN_FILENAME] = \
Package.normalize_firmware_to_bin(work_directory, firmware[FirmwareKeys.FIRMWARE_FILENAME])
# Calculate the hash for the .bin file located in the work directory
bin_file_path = os.path.join(work_directory, firmware[FirmwareKeys.BIN_FILENAME])
init_packet_data = firmware[FirmwareKeys.INIT_PACKET_DATA]
if self.dfu_ver <= 0.5:
firmware_hash = Package.calculate_crc16(bin_file_path)
init_packet_data[PacketField.NORDIC_PROPRIETARY_OPT_DATA_FIRMWARE_CRC16] = firmware_hash
elif self.dfu_ver == 0.6:
init_packet_data[PacketField.NORDIC_PROPRIETARY_OPT_DATA_EXT_PACKET_ID] = INIT_PACKET_USES_CRC16
firmware_hash = Package.calculate_crc16(bin_file_path)
init_packet_data[PacketField.NORDIC_PROPRIETARY_OPT_DATA_FIRMWARE_CRC16] = firmware_hash
elif self.dfu_ver == 0.7:
init_packet_data[PacketField.NORDIC_PROPRIETARY_OPT_DATA_EXT_PACKET_ID] = INIT_PACKET_USES_HASH
init_packet_data[PacketField.NORDIC_PROPRIETARY_OPT_DATA_FIRMWARE_LENGTH] = int(Package.calculate_file_size(bin_file_path))
firmware_hash = Package.calculate_sha256_hash(bin_file_path)
init_packet_data[PacketField.NORDIC_PROPRIETARY_OPT_DATA_FIRMWARE_HASH] = firmware_hash
elif self.dfu_ver == 0.8:
init_packet_data[PacketField.NORDIC_PROPRIETARY_OPT_DATA_EXT_PACKET_ID] = INIT_PACKET_EXT_USES_ECDS
firmware_hash = Package.calculate_sha256_hash(bin_file_path)
init_packet_data[PacketField.NORDIC_PROPRIETARY_OPT_DATA_FIRMWARE_LENGTH] = int(Package.calculate_file_size(bin_file_path))
init_packet_data[PacketField.NORDIC_PROPRIETARY_OPT_DATA_FIRMWARE_HASH] = firmware_hash
temp_packet = self._create_init_packet(firmware)
signer = Signing()
signer.load_key(self.key_file)
signature = signer.sign(temp_packet)
init_packet_data[PacketField.NORDIC_PROPRIETARY_OPT_DATA_INIT_PACKET_ECDS] = signature
# Store the .dat file in the work directory
init_packet = self._create_init_packet(firmware)
init_packet_filename = firmware[FirmwareKeys.BIN_FILENAME].replace(".bin", ".dat")
with open(os.path.join(work_directory, init_packet_filename), 'wb') as init_packet_file:
init_packet_file.write(init_packet)
firmware[FirmwareKeys.DAT_FILENAME] = \
init_packet_filename
# Store the manifest to manifest.json
manifest = self.create_manifest()
with open(os.path.join(work_directory, Package.MANIFEST_FILENAME), "w") as manifest_file:
manifest_file.write(manifest)
# Package the work_directory to a zip file
Package.create_zip_package(work_directory, filename)
# Delete the temporary directory
if not preserve_work_directory:
shutil.rmtree(work_directory)
@staticmethod
def __create_temp_workspace():
return tempfile.mkdtemp(prefix="nrf_dfu_")
@staticmethod
def create_zip_package(work_directory, filename):
files = os.listdir(work_directory)
with ZipFile(filename, 'w') as package:
for _file in files:
file_path = os.path.join(work_directory, _file)
package.write(file_path, _file)
@staticmethod
def calculate_file_size(firmware_filename):
b = os.path.getsize(firmware_filename)
return b
@staticmethod
def calculate_sha256_hash(firmware_filename):
read_buffer = 4096
digest = hashlib.sha256()
with open(firmware_filename, 'rb') as firmware_file:
while True:
data = firmware_file.read(read_buffer)
if data:
digest.update(data)
else:
break
return digest.digest()
@staticmethod
def calculate_crc16(firmware_filename):
"""
Calculates CRC16 has on provided firmware filename
:type str firmware_filename:
"""
data_buffer = b''
read_size = 4096
with open(firmware_filename, 'rb') as firmware_file:
while True:
data = firmware_file.read(read_size)
if data:
data_buffer += data
else:
break
return calc_crc16(data_buffer, 0xffff)
def create_manifest(self):
manifest = ManifestGenerator(self.dfu_ver, self.firmwares_data)
return manifest.generate_manifest()
@staticmethod
def _is_bootloader_softdevice_combination(firmwares):
return (HexType.BOOTLOADER in firmwares) and (HexType.SOFTDEVICE in firmwares)
def __add_firmware_info(self, firmware_type, filename, init_packet_data, sd_size=None, bl_size=None):
self.firmwares_data[firmware_type] = {
FirmwareKeys.FIRMWARE_FILENAME: filename,
FirmwareKeys.INIT_PACKET_DATA: init_packet_data.copy(),
# Copying init packet to avoid using the same for all firmware
}
if firmware_type == HexType.SD_BL:
self.firmwares_data[firmware_type][FirmwareKeys.SD_SIZE] = sd_size
self.firmwares_data[firmware_type][FirmwareKeys.BL_SIZE] = bl_size
@staticmethod
def _create_init_packet(firmware_data):
p = Packet(firmware_data[FirmwareKeys.INIT_PACKET_DATA])
return p.generate_packet()
@staticmethod
def normalize_firmware_to_bin(work_directory, firmware_path):
firmware_filename = os.path.basename(firmware_path)
new_filename = firmware_filename.replace(".hex", ".bin")
new_filepath = os.path.join(work_directory, new_filename)
if not os.path.exists(new_filepath):
temp = nRFHex(firmware_path)
temp.tobinfile(new_filepath)
return new_filepath
@staticmethod
def unpack_package(package_path, target_dir):
"""
Unpacks a Nordic DFU package.
:param str package_path: Path to the package
:param str target_dir: Target directory to unpack the package to
:return: Manifest Manifest: Returns a manifest back to the user. The manifest is a parse datamodel
of the manifest found in the Nordic DFU package.
"""
if not os.path.isfile(package_path):
raise NordicSemiException("Package {0} not found.".format(package_path))
target_dir = os.path.abspath(target_dir)
target_base_path = os.path.dirname(target_dir)
if not os.path.exists(target_base_path):
raise NordicSemiException("Base path to target directory {0} does not exist.".format(target_base_path))
if not os.path.isdir(target_base_path):
raise NordicSemiException("Base path to target directory {0} is not a directory.".format(target_base_path))
if os.path.exists(target_dir):
raise NordicSemiException(
"Target directory {0} exists, not able to unpack to that directory.",
target_dir)
with ZipFile(package_path, 'r') as pkg:
pkg.extractall(target_dir)
with open(os.path.join(target_dir, Package.MANIFEST_FILENAME), 'r') as f:
_json = f.read()
""":type :str """
return Manifest.from_json(_json)
| [
"NielsHokke@gmail.com"
] | NielsHokke@gmail.com |
d11ceddcb0305bf614396494c52ce1db3bac78a5 | 32544e36cf43e0ff41bdec6aab4a9c7e2d6506fd | /13_circle/circle.py | a3e4c88da30e6851a08f75148fbc1e6fdde7efe2 | [] | no_license | luispabreu/p3ws | 9374bfadcbda85b874c9dd441caf962fbcc8eea2 | e7ef18d36a222a2d4f2ef0b506a45a1d896f8273 | refs/heads/master | 2023-02-05T21:33:13.523410 | 2020-12-18T18:27:50 | 2020-12-18T18:27:50 | 319,834,565 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,412 | py | import math
from point import Point
class Circle:
"""This is a class of circles"""
def __init__(self, c=None, r=1):
if (c is None):
self.c = Point(0,0)
else:
self.c = c
assert r >= 0
self.r = r
pass
def __str__(self):
return '((' + str(self.c.x) + ', ' + str(self.c.y) + '), ' + str(self.r) + ')'
def __repr__(self):
return 'Circle' + self.__str__()
def move(self, dx, dy):
self.c.move(dx, dy)
def intersection_area(self, other_circle):
distance = self.c.distance_from(other_circle.c)
if distance >= (self.r + other_circle.r):
return 0
if distance <= abs(self.r - other_circle.r):
if self.r >= other_circle.r:
return math.pi * (other_circle.r**2)
else:
return math.pi * (self.r**2)
pass
else:
a = self.r**2 * math.acos((distance**2 + self.r**2 - other_circle.r**2) / (2 * distance * self.r))
b = other_circle.r**2 * math.acos((distance**2 + other_circle.r**2 - self.r**2) / (2 * distance * other_circle.r))
c = .5 * math.sqrt((- distance + self.r + other_circle.r)*(distance + self.r - other_circle.r)*(distance - self.r + other_circle.r)*(distance + self.r + other_circle.r))
return a + b - c
pass
| [
"lep43@duke.edu"
] | lep43@duke.edu |
2caf0ae0ff0fa1bc2def55b6b8820594bcd37f56 | 251aac7cebbac65f08e2c79fb40f76ff6a97f102 | /test_1/test_5.py | 14ce0929f0d94b960a8f91decb98c0fdb0bf64b3 | [] | no_license | XIAOXIzzz7/Pytest | a95eb936a265a846a5039a0dd1c181d0ab4ff405 | 2d70f4fbcd1017efec4a6f86a0467ba0c5576150 | refs/heads/master | 2023-03-29T04:46:58.452602 | 2021-03-30T09:27:43 | 2021-03-30T09:27:43 | 352,943,650 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 114 | py | from airtest.core.android.adb import ADB
adb = ADB()
def devices():
return adb.devices()
print(devices())
| [
"1783583986@qq.com"
] | 1783583986@qq.com |
d514000dc8136190feeb2ce9d3aa732983814687 | 51aeef84c9d4fbcf07a68dc1c1b1d06cd0eac696 | /test_client/steps/then.py | f495c584b18ce4a0fb63d046681780fef13b3225 | [] | no_license | kushalbavkar/python-automation | ade7e32840993ae3369f917dc6c2f2bcc9203ca6 | c3f66a41514dd7fd33eafe3b9efd689621cf9a3d | refs/heads/master | 2021-12-25T06:07:08.211125 | 2021-12-18T14:38:43 | 2021-12-18T14:38:43 | 253,262,957 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 589 | py | import allure
from pytest_bdd import then
from test_client.actions.github import GitHubActions
from test_client.ui.webdriver import WebDriver
from test_client.fixtures.test_context import TestContext
@then('I should see the repository page')
@allure.step('I should see the repository page')
def i_should_see_the_repository_page(test_context: TestContext, web_driver: WebDriver):
github = GitHubActions(web_driver)
author, project = github.get_project_details()
assert test_context.user == author, 'Invalid User'
assert test_context.project == project, 'Invalid Project'
| [
"bavkarkushal@gmail.com"
] | bavkarkushal@gmail.com |
70bcb8c2cc1754e8ada22e576440a4067ed57f0a | 129bbab6f150dab3878fb53310c6ce9d14e7bea7 | /python-src/chap7/mountain_poll.py | 33431739412f2ca5a1bdd156c5f4fb8cb631b37a | [] | no_license | icsoftwareproject/software-project-course | 53c367c81876019b1097cb749170473cebae637d | 2272fee5f3244c669f3ce9400e542b47bf4b8764 | refs/heads/master | 2023-08-04T20:29:41.093219 | 2023-08-02T09:10:43 | 2023-08-02T09:10:43 | 329,323,493 | 3 | 3 | null | null | null | null | UTF-8 | Python | false | false | 724 | py | responses = {}
# Set a flag to indicate that polling is active.
polling_active = True
while polling_active:
# Prompt for the person's name and response.
name = input("\nWhat is your name? ")
response = input("Which mountain would you like to climb someday? ")
# Store the response in the dictionary.
responses[name] = response
# Find out if anyone else is going to take the poll.
repeat = input("Would you like to let another person respond? (yes/ no) ")
if repeat == 'no':
polling_active = False
# Polling is complete. Show the results.
print("\n--- Poll Results ---")
for name, response in responses.items():
print(f"{name} would like to climb {response}.") | [
"baldoino@ic.ufal.br"
] | baldoino@ic.ufal.br |
59f0627ece60217800e3c91abd0f3269841b99de | a3354726b126b85987a1455bd4b1ed0a4d05f5bb | /apps/posts/templatetags/urlify.py | dbcef20ecbbcf46d92a98266c40fc00add8b6040 | [] | no_license | RonaldTheodoro/django-blog | cea90ab619e69560013a995c8d67d65e4593e0a9 | 92b64aa93c495fef835e64a98c9619cba3f518c4 | refs/heads/master | 2020-04-01T06:05:08.492523 | 2018-10-20T15:28:53 | 2018-10-20T15:28:53 | 152,932,753 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 163 | py | from urllib.parse import quote_plus
from django import template
register = template.Library()
@register.filter
def urlify(value):
return quote_plus(value)
| [
"ronald.silva4@fatec.sp.gov.br"
] | ronald.silva4@fatec.sp.gov.br |
cce8f8c63a4a63f58241e9dcde14ceaa2d3b6b2f | be22e62625f2183ac6d79c4316b9f83753d737ef | /qichacha.py | 11b7c53a32b91a7e264648ba7c5375f5f6157c15 | [] | no_license | chenjingorz/Qichacha-web-scrap | 39e67984fcb331a0093c354bfcc294cff217249e | b5b150ea0b1af077f275b3005ec95a40d35ccce8 | refs/heads/master | 2020-03-24T20:26:51.018522 | 2018-08-02T08:00:35 | 2018-08-02T08:00:35 | 142,978,352 | 0 | 0 | null | 2018-08-01T01:59:16 | 2018-07-31T07:27:42 | Python | UTF-8 | Python | false | false | 6,703 | py | # names with 100% on qichacha are stored in SQL, else stored in csv 'unmatched entities' for manual search
import pandas as pd
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.common.keys import Keys
from selenium.common.exceptions import NoSuchElementException
import pyodbc
from time import sleep
# import the excel file with entities
entity = pd.read_excel('C:\Work Data (Keppel ABC Audit)\PYTHON PROJECT\QICHACHA\Copy of entity.xlsx')
connection = pyodbc.connect('Driver={SQL Server Native Client 11.0};Server=SGKCLNB18030528;Database=Entity1;Trusted_Connection=yes;')
cursor = connection.cursor()
# ONLY TO CLEAR ALL ROWS IN THE TABLE
cursor.execute("truncate table MainMembers")
cursor.execute("truncate table StockHolders")
cursor.execute("truncate table EntityInfo")
connection.commit()
# run in BROWSER MODE if u want to see whats happening
driver = webdriver.Chrome('C:/Program Files/Java/chromedriver.exe')
driver.get('https://www.qichacha.com/')
# run in headless mode for faster execution, need to download google canary
#chrome_options = Options()
#chrome_options.add_argument("--headless")
#chrome_options.add_argument("--window-size=1920x1080")
#driver = webdriver.Chrome('C:/Program Files/Java/chromedriver.exe', chrome_options=chrome_options)
#driver.get('https://www.qichacha.com/')
# open with encoding so that when u write no need encoding
f = open('Unmatched entities2.csv','w', encoding = 'utf-8-sig')
f.write("Unmatched Entities|Search1|Search2|Search3|\n")
# login first
login = driver.find_element_by_xpath('/html/body/header/div/ul[2]/li[7]/a').click()
qq = driver.find_element_by_xpath('//*[@id="qrcodeLoginPanel"]/div[2]/div/div[3]/a[2]').click()
# switch to the different iframe embedded in the login page
driver.switch_to.frame(0)
qq = driver.find_element_by_xpath('//*[@id="switcher_plogin"]').click()
user = driver.find_element_by_xpath('//*[@id="u"]').send_keys('953726816')
pw = driver.find_element_by_xpath('//*[@id="p"]').send_keys('KepCorp@123')
login = driver.find_element_by_xpath('//*[@id="login_button"]').click()
sleep(5)
# for i in range(entity.shape[0]):
for i in range(326, entity.shape[0]):
if i != 0:
newSearch = driver.find_element_by_xpath('//*[@id="headerKey"]')
for k in range(len(key)):
newSearch.send_keys(Keys.BACKSPACE)
key = entity.iloc[i]['BUTXT']
newSearch.send_keys(key)
select = driver.find_element_by_xpath('/html/body/header/div/form/div/div/span/button').click()
else:
key = entity.iloc[0]['BUTXT']
search = driver.find_element_by_xpath('//*[@id="searchkey"]').send_keys(key)
search = driver.find_element_by_xpath('//*[@id="V3_Search_bt"]').click()
try:
name = driver.find_elements_by_class_name('ma_h1')
if len(name) == 0:
f.write(key + "\n")
else:
company = name[0].text
if company == key:
companyInfo = driver.find_elements_by_class_name('m-t-xs')
date = companyInfo[0].text.split('成立时间:')[1]
email = companyInfo[1].text.split(' ')[0][3:]
phone = companyInfo[1].text.split(' ')[1][3:]
address = companyInfo[2].text[3:]
command = "insert into EntityInfo(CompanyName, FoundedOn, Phone, Email, Address) values (N'" + key + "', '" + date + "', '" + phone + "' , '" + email + "', N'" + address + "')"
cursor.execute(command)
name[0].click()
driver.switch_to.window(driver.window_handles[1])
# legalRep
try:
comInfo = driver.find_element_by_id("Cominfo")
legalRep = comInfo.find_element_by_class_name('bname').text
command = "update EntityInfo set LegalRep = N'" + legalRep + "' where CompanyName = N'" + key + "'"
cursor.execute(command)
except NoSuchElementException:
command = "update EntityInfo set LegalRep = '-' where CompanyName = N'" + key + "'"
cursor.execute(command)
# use assigned variableName.find instead of driver.find to find the element under that variable
try:
mainMember = driver.find_element_by_id('Mainmember')
# name and position list
mainName = mainMember.find_elements_by_xpath(
'.//a[@class = "c_a"]') # .// current processing starts at the current node
mainPosition = mainMember.find_elements_by_xpath('.//td[@class = "text-center"]')
memberNo = mainMember.find_element_by_xpath('//*[@id="Mainmember"]/div/span[1]').text
for j in range(int(memberNo)):
# store into SQL
command = "insert into MainMembers(CompanyName, MainMember, Position) values (N'" + key + "', N'" + \
mainName[j].text + "', N'" + mainPosition[j].text + "')"
cursor.execute(command)
except NoSuchElementException:
pass
try:
stockInfo = driver.find_element_by_id('Sockinfo')
# stockholder list
stockHolders = stockInfo.find_elements_by_xpath('.//a[not(@class ="btn-touzi")]')
stockNo = stockInfo.find_element_by_xpath('//*[@id="Sockinfo"]/div/span[1]').text
for j in range(int(stockNo)):
command = "insert into StockHolders(CompanyName, StockHolder) values (N'" + key + "', N'" + \
stockHolders[j + 1].text + "')"
cursor.execute(command)
except NoSuchElementException:
pass
connection.commit()
# close current window and switch to original page
driver.close()
driver.switch_to.window(driver.window_handles[0])
else:
if (len(name) < 3):
write = key
for i in range(len(name)):
write += '|' + name[i].text
f.write(write + '\n')
else:
f.write(key + '|' + name[0].text + '|' + name[1].text + '|' + name[2].text + '\n')
except NoSuchElementException:
f.write(key + '\n')
f.close()
driver.close()
| [
"noreply@github.com"
] | noreply@github.com |
2cdf3790675f53dfcfc293bb402fb2fcc834e53d | 4f423b324b4c13a94dfd1efd936eed813cd7a1f8 | /flask_app.py | 1672993aa202d8d6197f53c314a2c8b817698e98 | [] | no_license | ecogit-stage/ReviewScapperDeployment | a61c2a98bab3c2c8abb10b1ded35e34dbd7e1cc5 | a45dc7564c4f0690022fc40ba5838bd413647e4f | refs/heads/main | 2023-08-25T20:09:04.676156 | 2021-10-27T09:42:01 | 2021-10-27T09:42:01 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,955 | py | # doing necessary imports
from flask import Flask, render_template, request,jsonify
# from flask_cors import CORS,cross_origin
import requests
from bs4 import BeautifulSoup as bs
from urllib.request import urlopen as uReq
import pymongo
app = Flask(__name__) # initialising the flask app with the name 'app'
# base url + /
#http://localhost:8000 + /
@app.route('/',methods=['POST','GET']) # route with allowed methods as POST and GET
def index():
if request.method == 'POST':
searchString = request.form['content'].replace(" ","") # obtaining the search string entered in the form
try:
dbConn = pymongo.MongoClient("mongodb://localhost:27017/") # opening a connection to Mongo
db = dbConn['crawlerDB'] # connecting to the database called crawlerDB
reviews = db[searchString].find({}) # searching the collection with the name same as the keyword
if reviews.count() > 0: # if there is a collection with searched keyword and it has records in it
return render_template('results.html',reviews=reviews) # show the results to user
else:
flipkart_url = "https://www.flipkart.com/search?q=" + searchString # preparing the URL to search the product on flipkart
uClient = uReq(flipkart_url) # requesting the webpage from the internet
flipkartPage = uClient.read() # reading the webpage
uClient.close() # closing the connection to the web server
flipkart_html = bs(flipkartPage, "html.parser") # parsing the webpage as HTML
bigboxes = flipkart_html.findAll("div", {"class": "_2pi5LC col-12-12"}) # seacrhing for appropriate tag to redirect to the product link
del bigboxes[0:3] # the first 3 members of the list do not contain relevant information, hence deleting them.
box = bigboxes[0] # taking the first iteration (for demo)
productLink = "https://www.flipkart.com" + box.div.div.div.a['href'] # extracting the actual product link
prodRes = requests.get(productLink) # getting the product page from server
prod_html = bs(prodRes.text, "html.parser") # parsing the product page as HTML
commentboxes = prod_html.find_all('div', {'class': "_16PBlm"}) # finding the HTML section containing the customer comments
table = db[searchString] # creating a collection with the same name as search string. Tables and Collections are analogous.
#filename = searchString+".csv" # filename to save the details
#fw = open(filename, "w") # creating a local file to save the details
#headers = "Product, Customer Name, Rating, Heading, Comment \n" # providing the heading of the columns
#fw.write(headers) # writing first the headers to file
reviews = [] # initializing an empty list for reviews
# iterating over the comment section to get the details of customer and their comments
for commentbox in commentboxes:
try:
name = commentbox.div.div.find_all('p', {'class': '_2sc7ZR _2V5EHH'})[0].text
except:
name = 'No Name'
try:
rating = commentbox.div.div.div.div.text
except:
rating = 'No Rating'
try:
commentHead = commentbox.div.div.div.p.text
except:
commentHead = 'No Comment Heading'
try:
comtag = commentbox.div.div.find_all('div', {'class': ''})
custComment = comtag[0].div.text
except:
custComment = 'No Customer Comment'
#fw.write(searchString+","+name.replace(",", ":")+","+rating + "," + commentHead.replace(",", ":") + "," + custComment.replace(",", ":") + "\n")
mydict = {"Product": searchString, "Name": name, "Rating": rating, "CommentHead": commentHead,
"Comment": custComment} # saving that detail to a dictionary
x = table.insert_one(mydict) #insertig the dictionary containing the rview comments to the collection
reviews.append(mydict) # appending the comments to the review list
return render_template('results.html', reviews=reviews) # showing the review to the user
except:
return 'something is wrong'
#return render_template('results.html')
else:
return render_template('index.html')
if __name__ == "__main__":
app.run(port=8080,debug=True) # running the app on the local machine on port 8000 | [
"noreply@github.com"
] | noreply@github.com |
1a9627a465aa1f53187fe367e69589eff0cf6a31 | a59d1faced9fe7348ca7143d2a8643e0ebad2132 | /pyvisdk/do/application_quiesce_fault.py | cf32f58e3304afc4abc1a5f0d39e25a272834537 | [
"MIT"
] | permissive | Infinidat/pyvisdk | c55d0e363131a8f35d2b0e6faa3294c191dba964 | f2f4e5f50da16f659ccc1d84b6a00f397fa997f8 | refs/heads/master | 2023-05-27T08:19:12.439645 | 2014-07-20T11:49:16 | 2014-07-20T11:49:16 | 4,072,898 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,288 | py |
import logging
from pyvisdk.exceptions import InvalidArgumentError
########################################
# Automatically generated, do not edit.
########################################
log = logging.getLogger(__name__)
def ApplicationQuiesceFault(vim, *args, **kwargs):
'''This fault is thrown when creating a quiesced snapshot failed because the
(user-supplied) custom pre-freeze script in the virtual machine exited with a
non-zero return code.This indicates that the script failed to perform its
quiescing task, which causes us to fail the quiesced snapshot operation.'''
obj = vim.client.factory.create('{urn:vim25}ApplicationQuiesceFault')
# do some validation checking...
if (len(args) + len(kwargs)) < 4:
raise IndexError('Expected at least 5 arguments got: %d' % len(args))
required = [ 'dynamicProperty', 'dynamicType', 'faultCause', 'faultMessage' ]
optional = [ ]
for name, arg in zip(required+optional, args):
setattr(obj, name, arg)
for name, value in kwargs.items():
if name in required + optional:
setattr(obj, name, value)
else:
raise InvalidArgumentError("Invalid argument: %s. Expected one of %s" % (name, ", ".join(required + optional)))
return obj
| [
"guy@rzn.co.il"
] | guy@rzn.co.il |
31642fa7ef14844e7529c37bd4b42f313d0a69bc | 32f7392217c50e1ee5a41db0414cbd6ca2427753 | /Tencent2020/txbase/emb.py | 2bf25c681898d4882c0639e0dc5cc6a532c10b48 | [] | no_license | Stella2019/KDD2020 | 0f315cd14c26bbcedc69b3982ca58d848d5d4a13 | 2604208d8bcac47ef097e6469633430637149b31 | refs/heads/main | 2023-07-02T02:22:07.707798 | 2021-08-14T06:15:04 | 2021-08-14T06:15:04 | 395,909,304 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,577 | py | from . import Cache
import numpy as np
class EmbBatchLoader:
def __init__(self,
all_emb_cols,
emb_base_dir=None,
key2index=None,
outer_emb=False):
"""
outer_emb: 设置该参数为True,如果是导入外部的embedding,
会造成key2index和word_emb_dict对不齐,词表不一样。
默认会认为用最大的值来填充低频词。
"""
self.all_emb_cols = all_emb_cols
self.all_emb_cols_backup = all_emb_cols
self.emb_base_dir = emb_base_dir
self.key2index = key2index
self.outer_emb = outer_emb
def _get_max_index(self, word_emb_dict):
'''
原先功能定位oov = max_index 后改为 =-1 此函数弃用
:param word_emb_dict:
:return:
'''
return str(sorted(map(int, list(word_emb_dict.keys())))[-1])
def get_emb_matrix(self, word_emb_dict, key2index_col): # modify by zlh
"""
prepare embedding for NN
initializing the embedding... id => emb vectors
the id is your own label encoding mapping...which stored in the self.key2index[col]
"""
if self.outer_emb:
# self._get_max_index(word_emb_dict) # 阿郑的是“max“为低频词
key_to_represent_rare = '-1'
else:
key_to_represent_rare = '-1' # 我的是”-1“为低频词
for _, k in word_emb_dict.items():
break
emb_size = k.shape[0]
voc_size = len(key2index_col)
# 真实的词表,编码始于1,你准备input sequence的时候编码的词表,GetSeqFeas.ipynb
# 100个词,编码就是1-100,所以初始化要+1
emb_matrix = np.zeros((voc_size + 1, emb_size)) # 0如何优化,mean?
# emb 中必须要有'-1'
if '-1' not in word_emb_dict.keys():
# emb中无-1 为全词表数据!需要自行计算均值emb vec
# 为embi 添加一个embedding
# 求词表与embi key的差
set_drop_words = list(
set(word_emb_dict.keys()).difference(set(
key2index_col.keys())))
if len(set_drop_words) > 0:
# 这些词的vector求均值作为这个oov词的embedding vector
vector_low_frequency_words = np.zeros((emb_size, ))
for w in set_drop_words:
vector_low_frequency_words += word_emb_dict[w]
vector_low_frequency_words = vector_low_frequency_words / len(
set_drop_words)
# emb添加一个key value
word_emb_dict['-1'] = vector_low_frequency_words
print(' file has ' + str(len(set_drop_words)) + \
' low frequency words and fill vector as:', vector_low_frequency_words)
for k, idx in key2index_col.items():
try:
emb_matrix[idx, :] = word_emb_dict[k]
except KeyError: # 如果k不在不在word_emb_dict中,则默认用max_key_to_represent_rare填充
# print('find oov:',(k, idx))
emb_matrix[idx, :] = word_emb_dict[key_to_represent_rare]
emb_matrix = np.float32(emb_matrix)
return emb_matrix
def load_batch_embedding(self, emb_base_name, pure_nm):
"""
批量导入embedding,目前对于一组embedding就self.all_emb_cols个变量
"""
emb_dict = {}
for col in self.all_emb_cols:
file_nm = F'{emb_base_name}_{col}'
try:
emb_dict[col] = Cache.reload_cache(
file_nm=file_nm,
pure_nm=pure_nm,
base_dir=self.emb_base_dir)['word_emb_dict']
except FileNotFoundError as e:
print("[Error]" + " = =" * 30)
print("ErrorMessage: ", e)
print("col: ", col)
print("file_nm:", file_nm)
print("[Error]" + " = =" * 30)
print(f"Raw self.all_emb_cols: {self.all_emb_cols}")
self.all_emb_cols = list(emb_dict.keys())
print(f"Updated self.all_emb_cols: {self.all_emb_cols}")
return emb_dict
def load_emb_dict_with_raw_embs(self,
marker=None,
emb_base_name=None,
sentence_id='user_id',
pure_nm=True):
if emb_base_name is None:
if marker is None:
raise ValueError(
"marker can't be None if emb_base_name is None!!")
else:
if marker.endswith("_advertiser_id") or marker.endswith(
"_user_id"):
# marker中包括了sentence_id,目前sentence_id只有_advertiser_id和_user_id
emb_base_name = F'EMB_DICT_{marker}'
else:
# marker中不包括sentence_id,需要添加
emb_base_name = F'EMB_DICT_{marker}_{sentence_id}'
else:
emb_base_name = emb_base_name.rstrip('_') # 对于一组embedding一致的名称
emb_dict_with_raw_embs = self.load_batch_embedding(
emb_base_name, pure_nm)
return emb_dict_with_raw_embs
def get_batch_emb_matrix(self,
marker=None,
emb_base_name=None,
sentence_id='user_id',
pure_nm=True):
emb_dict_with_raw_embs = self.load_emb_dict_with_raw_embs(
marker=marker,
emb_base_name=emb_base_name,
sentence_id=sentence_id,
pure_nm=pure_nm)
emb_matrix_ready_dict = {}
for col in self.all_emb_cols:
emb_matrix_ready_dict[col] = self.get_emb_matrix(
emb_dict_with_raw_embs[col], key2index_col=self.key2index[col])
print("-" * 6)
print("Done!")
# restore all_emb_cols to all_emb_cols_backup
self.all_emb_cols = self.all_emb_cols_backup
return emb_matrix_ready_dict
def get_batch_emb_matrix_by_absolute_path(self,
absolute_path_with_placeholder):
emb_matrix_ready_dict = {}
for col in self.all_emb_cols:
path = absolute_path_with_placeholder.format(col)
try:
i_raw_embs = Cache.reload_cache(
file_nm=path, base_dir=self.emb_base_dir)['word_emb_dict']
emb_matrix_ready_dict[col] = self.get_emb_matrix(
i_raw_embs, key2index_col=self.key2index[col])
except FileNotFoundError as e:
print("[Error]" + " = =" * 30)
print("ErrorMessage: ", e)
print("col: ", col)
print("file_nm:", path)
print("[Error]" + " = =" * 30)
print(f"Raw self.all_emb_cols: {self.all_emb_cols}")
self.all_emb_cols = list(emb_matrix_ready_dict.keys())
print(f"Updated self.all_emb_cols: {self.all_emb_cols}")
print("-" * 6)
print("Done!")
# restore all_emb_cols to all_emb_cols_backup
self.all_emb_cols = self.all_emb_cols_backup
return emb_matrix_ready_dict
| [
"noreply@github.com"
] | noreply@github.com |
e7b126798e6ab0dbd7122c45284758c024e3909c | 75b977558dbaa2de55797085e1d8b6bc703bd168 | /Re-ID/reid/datasets/veri.py | 9c13af1d6e30fb592aee54784ae4114deed2d6de | [] | no_license | XrosLiang/VehicleX | f8966eda1687d722830391a619261df5eedd6e71 | 24e0017176724ebb789135a57b9d089595707e2c | refs/heads/master | 2022-12-09T01:44:30.360054 | 2020-08-30T14:06:05 | 2020-08-30T14:06:05 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,103 | py | from __future__ import print_function, absolute_import
import os.path as osp
import re
from glob import glob
import random
class VeRi(object):
def __init__(self, root, real = True, synthetic = True): # sys_image_by_test_baseline_multidistractor_AIC_1211_domain_transfer(Veri)_fix_intensity/
train_dir = './data/VeRi/image_train/'
sys_dir = './data/VeRi/VeRi_ReID_Simulation/'
query_dir = './data/VeRi/image_query/'
gallery_dir = './data/VeRi/image_test/'
self.train_path = osp.expanduser(train_dir)
self.gallery_path = osp.expanduser(gallery_dir)
self.query_path = osp.expanduser(query_dir)
self.sys_path = osp.expanduser(sys_dir)
self.real = real
self.synthetic = synthetic
self.train, self.query, self.gallery = [], [], []
self.num_train_ids, self.num_query_ids, self.num_gallery_ids = 0, 0, 0
self.load()
def preprocess(self, path, real = True, synthetic = False):
pattern = re.compile(r'(\d+)_c(\d+)')
all_pids = {}
ret = []
if real:
fpaths = sorted(glob(osp.join(path, '*.jpg')))
for fpath in fpaths:
fname = osp.basename(fpath)
pid, cam = map(int, pattern.search(fname).groups())
# if cam > 15: continue
if pid == -1: continue
if pid not in all_pids:
all_pids[pid] = len(all_pids)
pid = all_pids[pid]
ret.append((fname, pid, cam))
if synthetic:
fpaths = sorted(glob(osp.join(self.sys_path, '*.jpg')))
random.shuffle(fpaths)
if not real:
fpaths = fpaths[:45338]
else:
fpaths = fpaths[:int(len(ret) * 1 )]
for fpath in fpaths:
fname = "../" + self.sys_path.split('/')[-2] + "/" + osp.basename(fpath)
pid, cam = map(int, pattern.search(fname).groups())
pid = -pid
if pid == -1: continue
if pid not in all_pids:
all_pids[pid] = len(all_pids)
pid = all_pids[pid]
ret.append((fname, pid, cam))
return ret, int(len(all_pids))
def load(self):
print ("real path:", self.train_path, "synthetic path:", self.sys_path)
self.train, self.num_train_ids = self.preprocess(self.train_path, self.real, self.synthetic)
self.gallery, self.num_gallery_ids = self.preprocess(self.gallery_path)
self.query, self.num_query_ids = self.preprocess(self.query_path)
print(self.__class__.__name__, "dataset loaded")
print(" subset | # ids | # images")
print(" ---------------------------")
print(" train | {:5d} | {:8d}"
.format(self.num_train_ids, len(self.train)))
print(" query | {:5d} | {:8d}"
.format(self.num_query_ids, len(self.query)))
print(" gallery | {:5d} | {:8d}"
.format(self.num_gallery_ids, len(self.gallery)))
| [
"u6014942@anu.edu.au"
] | u6014942@anu.edu.au |
4c98e08132aeae3e18d23763c7ba5bf9f7915f22 | 3970706a16be81a63b2476222c1b061da9f11b70 | /estimator/download_data.py | 4cf480e781e68353a149d1325da327b6ec2ae348 | [] | no_license | sfujiwara/tensorflow-examples | 3de3fb90c6204bec2c455f8f1b9aa98a14f393b9 | 6b9dd3ba27e1b0d021c322f5504e888b6b7ed4fb | refs/heads/master | 2023-04-18T11:33:43.271751 | 2020-12-17T20:49:57 | 2020-12-17T20:49:57 | 126,787,804 | 1 | 0 | null | 2023-03-25T00:25:33 | 2018-03-26T07:06:44 | Python | UTF-8 | Python | false | false | 426 | py | import argparse
import tensorflow_datasets as tfds
parser = argparse.ArgumentParser()
parser.add_argument('--tfds_dir', type=str)
parser.add_argument('--dataset_name', type=str)
args = parser.parse_args()
TFDS_DIR = args.tfds_dir
DATASET_NAME = args.dataset_name
def main():
builder = tfds.builder(DATASET_NAME, data_dir=TFDS_DIR)
builder.download_and_prepare()
return
if __name__ == '__main__':
main()
| [
"shuhei.fujiwara@gmail.com"
] | shuhei.fujiwara@gmail.com |
8834a7f5c7e8320536a2a64882a2005412818bbd | a3b946f37561e96c0f3615fe373df3c58d697599 | /django_storytime/urls.py | 2b8b137fd78deb6b9c4dde847a935f6d9d98828e | [] | no_license | story-time-team/storytime-drf | c5d8c5b1d2e7384b56bf364b9713fc75d168096b | f934c3da90460a702f3b7d36e5903e45532a0968 | refs/heads/main | 2023-02-16T04:02:58.814300 | 2020-12-28T19:56:05 | 2020-12-28T19:56:05 | 325,104,058 | 0 | 0 | null | 2020-12-29T22:01:43 | 2020-12-28T19:54:47 | Python | UTF-8 | Python | false | false | 885 | py | """django_storytime URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
urlpatterns = [
path('admin/', admin.site.urls),
path('api-auth', include('rest_framework.urls', namespace='rest_framework')),
path('', include('users.urls'))
]
| [
"ashveerbhayroo@gmail.com"
] | ashveerbhayroo@gmail.com |
4b8eff8148ed0ac19a6ac1759eb66417d0b8a4a0 | f445450ac693b466ca20b42f1ac82071d32dd991 | /generated_tempdir_2019_09_15_163300/generated_part010263.py | 46c4f1fd19764edd4977285229bf635d77cfbf13 | [] | no_license | Upabjojr/rubi_generated | 76e43cbafe70b4e1516fb761cabd9e5257691374 | cd35e9e51722b04fb159ada3d5811d62a423e429 | refs/heads/master | 2020-07-25T17:26:19.227918 | 2019-09-15T15:41:48 | 2019-09-15T15:41:48 | 208,357,412 | 4 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,300 | py | from sympy.abc import *
from matchpy.matching.many_to_one import CommutativeMatcher
from matchpy import *
from matchpy.utils import VariableWithCount
from collections import deque
from multiset import Multiset
from sympy.integrals.rubi.constraints import *
from sympy.integrals.rubi.utility_function import *
from sympy.integrals.rubi.rules.miscellaneous_integration import *
from sympy import *
class CommutativeMatcher51281(CommutativeMatcher):
_instance = None
patterns = {
0: (0, Multiset({}), [
(VariableWithCount('i3.2.1.2.1.0', 1, 1, None), Mul),
(VariableWithCount('i3.2.1.2.1.0_1', 1, 1, S(1)), Mul)
])
}
subjects = {}
subjects_by_id = {}
bipartite = BipartiteGraph()
associative = Mul
max_optional_count = 1
anonymous_patterns = set()
def __init__(self):
self.add_subject(None)
@staticmethod
def get():
if CommutativeMatcher51281._instance is None:
CommutativeMatcher51281._instance = CommutativeMatcher51281()
return CommutativeMatcher51281._instance
@staticmethod
def get_match_iter(subject):
subjects = deque([subject]) if subject is not None else deque()
subst0 = Substitution()
# State 51280
return
yield
from collections import deque | [
"franz.bonazzi@gmail.com"
] | franz.bonazzi@gmail.com |
666b782608b8d5c6c2de7e612156d9619e0281fa | 2aa817f40517f7c9c55534948df9d6d1742a8da8 | /core/management/commands/init_admin.py | 5b53553984751d2945c0f24b8caf065e4ff59e64 | [] | no_license | arischow/buzzbird | 2b4d24130e18c4b03be5c287c56b60efa142a4ee | 3ffcc849f9360037fb4aff5f2d3370a2ba145fcc | refs/heads/master | 2020-07-14T15:37:09.620648 | 2019-08-12T05:58:36 | 2019-08-12T05:58:36 | 152,390,942 | 2 | 3 | null | null | null | null | UTF-8 | Python | false | false | 572 | py | from django.contrib.auth.models import User
from django.core.management.base import BaseCommand
class Command(BaseCommand):
def handle(self, *args, **options):
admin = User.objects.filter(username='arischow').first()
if admin:
self.stdout.write('Overlord arischow existed.')
else:
User.objects.create_superuser(username='arischow',
email='arischow@gmail.com',
password='cnspurs8633')
self.stdout.write('Admin created.')
| [
"arischow@gmail.com"
] | arischow@gmail.com |
d3a61e297c89395471dd9579fe85c7371d2b0d99 | 6873ec680d2b77164e0bb4e9d8daafed7ca995df | /devel/lib/python2.7/dist-packages/msgs_demo/msg/_MoveBaseAction.py | 4149ebb0cf0f73b368eb57e31361fe5ce52bb825 | [] | no_license | wzq-hwx/uptest | 45f60d0e77a59794b4ee061c6275bc2a2ea11ae2 | 577fdf4300edcce4c0ecc6a3d55433fcc319527b | refs/heads/master | 2020-04-25T21:17:43.145310 | 2019-02-28T08:48:26 | 2019-02-28T08:48:26 | 173,076,111 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 31,107 | py | # This Python file uses the following encoding: utf-8
"""autogenerated by genpy from msgs_demo/MoveBaseAction.msg. Do not edit."""
import sys
python3 = True if sys.hexversion > 0x03000000 else False
import genpy
import struct
import geometry_msgs.msg
import genpy
import actionlib_msgs.msg
import msgs_demo.msg
import std_msgs.msg
class MoveBaseAction(genpy.Message):
_md5sum = "70b6aca7c7f7746d8d1609ad94c80bb8"
_type = "msgs_demo/MoveBaseAction"
_has_header = False #flag to mark the presence of a Header object
_full_text = """# ====== DO NOT MODIFY! AUTOGENERATED FROM AN ACTION DEFINITION ======
MoveBaseActionGoal action_goal
MoveBaseActionResult action_result
MoveBaseActionFeedback action_feedback
================================================================================
MSG: msgs_demo/MoveBaseActionGoal
# ====== DO NOT MODIFY! AUTOGENERATED FROM AN ACTION DEFINITION ======
Header header
actionlib_msgs/GoalID goal_id
MoveBaseGoal goal
================================================================================
MSG: std_msgs/Header
# Standard metadata for higher-level stamped data types.
# This is generally used to communicate timestamped data
# in a particular coordinate frame.
#
# sequence ID: consecutively increasing ID
uint32 seq
#Two-integer timestamp that is expressed as:
# * stamp.sec: seconds (stamp_secs) since epoch (in Python the variable is called 'secs')
# * stamp.nsec: nanoseconds since stamp_secs (in Python the variable is called 'nsecs')
# time-handling sugar is provided by the client library
time stamp
#Frame this data is associated with
# 0: no frame
# 1: global frame
string frame_id
================================================================================
MSG: actionlib_msgs/GoalID
# The stamp should store the time at which this goal was requested.
# It is used by an action server when it tries to preempt all
# goals that were requested before a certain time
time stamp
# The id provides a way to associate feedback and
# result message with specific goal requests. The id
# specified must be unique.
string id
================================================================================
MSG: msgs_demo/MoveBaseGoal
# ====== DO NOT MODIFY! AUTOGENERATED FROM AN ACTION DEFINITION ======
geometry_msgs/PoseStamped target_pose
================================================================================
MSG: geometry_msgs/PoseStamped
# A Pose with reference coordinate frame and timestamp
Header header
Pose pose
================================================================================
MSG: geometry_msgs/Pose
# A representation of pose in free space, composed of position and orientation.
Point position
Quaternion orientation
================================================================================
MSG: geometry_msgs/Point
# This contains the position of a point in free space
float64 x
float64 y
float64 z
================================================================================
MSG: geometry_msgs/Quaternion
# This represents an orientation in free space in quaternion form.
float64 x
float64 y
float64 z
float64 w
================================================================================
MSG: msgs_demo/MoveBaseActionResult
# ====== DO NOT MODIFY! AUTOGENERATED FROM AN ACTION DEFINITION ======
Header header
actionlib_msgs/GoalStatus status
MoveBaseResult result
================================================================================
MSG: actionlib_msgs/GoalStatus
GoalID goal_id
uint8 status
uint8 PENDING = 0 # The goal has yet to be processed by the action server
uint8 ACTIVE = 1 # The goal is currently being processed by the action server
uint8 PREEMPTED = 2 # The goal received a cancel request after it started executing
# and has since completed its execution (Terminal State)
uint8 SUCCEEDED = 3 # The goal was achieved successfully by the action server (Terminal State)
uint8 ABORTED = 4 # The goal was aborted during execution by the action server due
# to some failure (Terminal State)
uint8 REJECTED = 5 # The goal was rejected by the action server without being processed,
# because the goal was unattainable or invalid (Terminal State)
uint8 PREEMPTING = 6 # The goal received a cancel request after it started executing
# and has not yet completed execution
uint8 RECALLING = 7 # The goal received a cancel request before it started executing,
# but the action server has not yet confirmed that the goal is canceled
uint8 RECALLED = 8 # The goal received a cancel request before it started executing
# and was successfully cancelled (Terminal State)
uint8 LOST = 9 # An action client can determine that a goal is LOST. This should not be
# sent over the wire by an action server
#Allow for the user to associate a string with GoalStatus for debugging
string text
================================================================================
MSG: msgs_demo/MoveBaseResult
# ====== DO NOT MODIFY! AUTOGENERATED FROM AN ACTION DEFINITION ======
================================================================================
MSG: msgs_demo/MoveBaseActionFeedback
# ====== DO NOT MODIFY! AUTOGENERATED FROM AN ACTION DEFINITION ======
Header header
actionlib_msgs/GoalStatus status
MoveBaseFeedback feedback
================================================================================
MSG: msgs_demo/MoveBaseFeedback
# ====== DO NOT MODIFY! AUTOGENERATED FROM AN ACTION DEFINITION ======
geometry_msgs/PoseStamped base_position
"""
__slots__ = ['action_goal','action_result','action_feedback']
_slot_types = ['msgs_demo/MoveBaseActionGoal','msgs_demo/MoveBaseActionResult','msgs_demo/MoveBaseActionFeedback']
def __init__(self, *args, **kwds):
"""
Constructor. Any message fields that are implicitly/explicitly
set to None will be assigned a default value. The recommend
use is keyword arguments as this is more robust to future message
changes. You cannot mix in-order arguments and keyword arguments.
The available fields are:
action_goal,action_result,action_feedback
:param args: complete set of field values, in .msg order
:param kwds: use keyword arguments corresponding to message field names
to set specific fields.
"""
if args or kwds:
super(MoveBaseAction, self).__init__(*args, **kwds)
#message fields cannot be None, assign default values for those that are
if self.action_goal is None:
self.action_goal = msgs_demo.msg.MoveBaseActionGoal()
if self.action_result is None:
self.action_result = msgs_demo.msg.MoveBaseActionResult()
if self.action_feedback is None:
self.action_feedback = msgs_demo.msg.MoveBaseActionFeedback()
else:
self.action_goal = msgs_demo.msg.MoveBaseActionGoal()
self.action_result = msgs_demo.msg.MoveBaseActionResult()
self.action_feedback = msgs_demo.msg.MoveBaseActionFeedback()
def _get_types(self):
"""
internal API method
"""
return self._slot_types
def serialize(self, buff):
"""
serialize message into buffer
:param buff: buffer, ``StringIO``
"""
try:
_x = self
buff.write(_get_struct_3I().pack(_x.action_goal.header.seq, _x.action_goal.header.stamp.secs, _x.action_goal.header.stamp.nsecs))
_x = self.action_goal.header.frame_id
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
_x = self
buff.write(_get_struct_2I().pack(_x.action_goal.goal_id.stamp.secs, _x.action_goal.goal_id.stamp.nsecs))
_x = self.action_goal.goal_id.id
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
_x = self
buff.write(_get_struct_3I().pack(_x.action_goal.goal.target_pose.header.seq, _x.action_goal.goal.target_pose.header.stamp.secs, _x.action_goal.goal.target_pose.header.stamp.nsecs))
_x = self.action_goal.goal.target_pose.header.frame_id
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
_x = self
buff.write(_get_struct_7d3I().pack(_x.action_goal.goal.target_pose.pose.position.x, _x.action_goal.goal.target_pose.pose.position.y, _x.action_goal.goal.target_pose.pose.position.z, _x.action_goal.goal.target_pose.pose.orientation.x, _x.action_goal.goal.target_pose.pose.orientation.y, _x.action_goal.goal.target_pose.pose.orientation.z, _x.action_goal.goal.target_pose.pose.orientation.w, _x.action_result.header.seq, _x.action_result.header.stamp.secs, _x.action_result.header.stamp.nsecs))
_x = self.action_result.header.frame_id
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
_x = self
buff.write(_get_struct_2I().pack(_x.action_result.status.goal_id.stamp.secs, _x.action_result.status.goal_id.stamp.nsecs))
_x = self.action_result.status.goal_id.id
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
buff.write(_get_struct_B().pack(self.action_result.status.status))
_x = self.action_result.status.text
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
_x = self
buff.write(_get_struct_3I().pack(_x.action_feedback.header.seq, _x.action_feedback.header.stamp.secs, _x.action_feedback.header.stamp.nsecs))
_x = self.action_feedback.header.frame_id
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
_x = self
buff.write(_get_struct_2I().pack(_x.action_feedback.status.goal_id.stamp.secs, _x.action_feedback.status.goal_id.stamp.nsecs))
_x = self.action_feedback.status.goal_id.id
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
buff.write(_get_struct_B().pack(self.action_feedback.status.status))
_x = self.action_feedback.status.text
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
_x = self
buff.write(_get_struct_3I().pack(_x.action_feedback.feedback.base_position.header.seq, _x.action_feedback.feedback.base_position.header.stamp.secs, _x.action_feedback.feedback.base_position.header.stamp.nsecs))
_x = self.action_feedback.feedback.base_position.header.frame_id
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
_x = self
buff.write(_get_struct_7d().pack(_x.action_feedback.feedback.base_position.pose.position.x, _x.action_feedback.feedback.base_position.pose.position.y, _x.action_feedback.feedback.base_position.pose.position.z, _x.action_feedback.feedback.base_position.pose.orientation.x, _x.action_feedback.feedback.base_position.pose.orientation.y, _x.action_feedback.feedback.base_position.pose.orientation.z, _x.action_feedback.feedback.base_position.pose.orientation.w))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize(self, str):
"""
unpack serialized message in str into this message instance
:param str: byte array of serialized message, ``str``
"""
try:
if self.action_goal is None:
self.action_goal = msgs_demo.msg.MoveBaseActionGoal()
if self.action_result is None:
self.action_result = msgs_demo.msg.MoveBaseActionResult()
if self.action_feedback is None:
self.action_feedback = msgs_demo.msg.MoveBaseActionFeedback()
end = 0
_x = self
start = end
end += 12
(_x.action_goal.header.seq, _x.action_goal.header.stamp.secs, _x.action_goal.header.stamp.nsecs,) = _get_struct_3I().unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.action_goal.header.frame_id = str[start:end].decode('utf-8')
else:
self.action_goal.header.frame_id = str[start:end]
_x = self
start = end
end += 8
(_x.action_goal.goal_id.stamp.secs, _x.action_goal.goal_id.stamp.nsecs,) = _get_struct_2I().unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.action_goal.goal_id.id = str[start:end].decode('utf-8')
else:
self.action_goal.goal_id.id = str[start:end]
_x = self
start = end
end += 12
(_x.action_goal.goal.target_pose.header.seq, _x.action_goal.goal.target_pose.header.stamp.secs, _x.action_goal.goal.target_pose.header.stamp.nsecs,) = _get_struct_3I().unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.action_goal.goal.target_pose.header.frame_id = str[start:end].decode('utf-8')
else:
self.action_goal.goal.target_pose.header.frame_id = str[start:end]
_x = self
start = end
end += 68
(_x.action_goal.goal.target_pose.pose.position.x, _x.action_goal.goal.target_pose.pose.position.y, _x.action_goal.goal.target_pose.pose.position.z, _x.action_goal.goal.target_pose.pose.orientation.x, _x.action_goal.goal.target_pose.pose.orientation.y, _x.action_goal.goal.target_pose.pose.orientation.z, _x.action_goal.goal.target_pose.pose.orientation.w, _x.action_result.header.seq, _x.action_result.header.stamp.secs, _x.action_result.header.stamp.nsecs,) = _get_struct_7d3I().unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.action_result.header.frame_id = str[start:end].decode('utf-8')
else:
self.action_result.header.frame_id = str[start:end]
_x = self
start = end
end += 8
(_x.action_result.status.goal_id.stamp.secs, _x.action_result.status.goal_id.stamp.nsecs,) = _get_struct_2I().unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.action_result.status.goal_id.id = str[start:end].decode('utf-8')
else:
self.action_result.status.goal_id.id = str[start:end]
start = end
end += 1
(self.action_result.status.status,) = _get_struct_B().unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.action_result.status.text = str[start:end].decode('utf-8')
else:
self.action_result.status.text = str[start:end]
_x = self
start = end
end += 12
(_x.action_feedback.header.seq, _x.action_feedback.header.stamp.secs, _x.action_feedback.header.stamp.nsecs,) = _get_struct_3I().unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.action_feedback.header.frame_id = str[start:end].decode('utf-8')
else:
self.action_feedback.header.frame_id = str[start:end]
_x = self
start = end
end += 8
(_x.action_feedback.status.goal_id.stamp.secs, _x.action_feedback.status.goal_id.stamp.nsecs,) = _get_struct_2I().unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.action_feedback.status.goal_id.id = str[start:end].decode('utf-8')
else:
self.action_feedback.status.goal_id.id = str[start:end]
start = end
end += 1
(self.action_feedback.status.status,) = _get_struct_B().unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.action_feedback.status.text = str[start:end].decode('utf-8')
else:
self.action_feedback.status.text = str[start:end]
_x = self
start = end
end += 12
(_x.action_feedback.feedback.base_position.header.seq, _x.action_feedback.feedback.base_position.header.stamp.secs, _x.action_feedback.feedback.base_position.header.stamp.nsecs,) = _get_struct_3I().unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.action_feedback.feedback.base_position.header.frame_id = str[start:end].decode('utf-8')
else:
self.action_feedback.feedback.base_position.header.frame_id = str[start:end]
_x = self
start = end
end += 56
(_x.action_feedback.feedback.base_position.pose.position.x, _x.action_feedback.feedback.base_position.pose.position.y, _x.action_feedback.feedback.base_position.pose.position.z, _x.action_feedback.feedback.base_position.pose.orientation.x, _x.action_feedback.feedback.base_position.pose.orientation.y, _x.action_feedback.feedback.base_position.pose.orientation.z, _x.action_feedback.feedback.base_position.pose.orientation.w,) = _get_struct_7d().unpack(str[start:end])
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
def serialize_numpy(self, buff, numpy):
"""
serialize message with numpy array types into buffer
:param buff: buffer, ``StringIO``
:param numpy: numpy python module
"""
try:
_x = self
buff.write(_get_struct_3I().pack(_x.action_goal.header.seq, _x.action_goal.header.stamp.secs, _x.action_goal.header.stamp.nsecs))
_x = self.action_goal.header.frame_id
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
_x = self
buff.write(_get_struct_2I().pack(_x.action_goal.goal_id.stamp.secs, _x.action_goal.goal_id.stamp.nsecs))
_x = self.action_goal.goal_id.id
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
_x = self
buff.write(_get_struct_3I().pack(_x.action_goal.goal.target_pose.header.seq, _x.action_goal.goal.target_pose.header.stamp.secs, _x.action_goal.goal.target_pose.header.stamp.nsecs))
_x = self.action_goal.goal.target_pose.header.frame_id
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
_x = self
buff.write(_get_struct_7d3I().pack(_x.action_goal.goal.target_pose.pose.position.x, _x.action_goal.goal.target_pose.pose.position.y, _x.action_goal.goal.target_pose.pose.position.z, _x.action_goal.goal.target_pose.pose.orientation.x, _x.action_goal.goal.target_pose.pose.orientation.y, _x.action_goal.goal.target_pose.pose.orientation.z, _x.action_goal.goal.target_pose.pose.orientation.w, _x.action_result.header.seq, _x.action_result.header.stamp.secs, _x.action_result.header.stamp.nsecs))
_x = self.action_result.header.frame_id
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
_x = self
buff.write(_get_struct_2I().pack(_x.action_result.status.goal_id.stamp.secs, _x.action_result.status.goal_id.stamp.nsecs))
_x = self.action_result.status.goal_id.id
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
buff.write(_get_struct_B().pack(self.action_result.status.status))
_x = self.action_result.status.text
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
_x = self
buff.write(_get_struct_3I().pack(_x.action_feedback.header.seq, _x.action_feedback.header.stamp.secs, _x.action_feedback.header.stamp.nsecs))
_x = self.action_feedback.header.frame_id
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
_x = self
buff.write(_get_struct_2I().pack(_x.action_feedback.status.goal_id.stamp.secs, _x.action_feedback.status.goal_id.stamp.nsecs))
_x = self.action_feedback.status.goal_id.id
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
buff.write(_get_struct_B().pack(self.action_feedback.status.status))
_x = self.action_feedback.status.text
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
_x = self
buff.write(_get_struct_3I().pack(_x.action_feedback.feedback.base_position.header.seq, _x.action_feedback.feedback.base_position.header.stamp.secs, _x.action_feedback.feedback.base_position.header.stamp.nsecs))
_x = self.action_feedback.feedback.base_position.header.frame_id
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
_x = self
buff.write(_get_struct_7d().pack(_x.action_feedback.feedback.base_position.pose.position.x, _x.action_feedback.feedback.base_position.pose.position.y, _x.action_feedback.feedback.base_position.pose.position.z, _x.action_feedback.feedback.base_position.pose.orientation.x, _x.action_feedback.feedback.base_position.pose.orientation.y, _x.action_feedback.feedback.base_position.pose.orientation.z, _x.action_feedback.feedback.base_position.pose.orientation.w))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize_numpy(self, str, numpy):
"""
unpack serialized message in str into this message instance using numpy for array types
:param str: byte array of serialized message, ``str``
:param numpy: numpy python module
"""
try:
if self.action_goal is None:
self.action_goal = msgs_demo.msg.MoveBaseActionGoal()
if self.action_result is None:
self.action_result = msgs_demo.msg.MoveBaseActionResult()
if self.action_feedback is None:
self.action_feedback = msgs_demo.msg.MoveBaseActionFeedback()
end = 0
_x = self
start = end
end += 12
(_x.action_goal.header.seq, _x.action_goal.header.stamp.secs, _x.action_goal.header.stamp.nsecs,) = _get_struct_3I().unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.action_goal.header.frame_id = str[start:end].decode('utf-8')
else:
self.action_goal.header.frame_id = str[start:end]
_x = self
start = end
end += 8
(_x.action_goal.goal_id.stamp.secs, _x.action_goal.goal_id.stamp.nsecs,) = _get_struct_2I().unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.action_goal.goal_id.id = str[start:end].decode('utf-8')
else:
self.action_goal.goal_id.id = str[start:end]
_x = self
start = end
end += 12
(_x.action_goal.goal.target_pose.header.seq, _x.action_goal.goal.target_pose.header.stamp.secs, _x.action_goal.goal.target_pose.header.stamp.nsecs,) = _get_struct_3I().unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.action_goal.goal.target_pose.header.frame_id = str[start:end].decode('utf-8')
else:
self.action_goal.goal.target_pose.header.frame_id = str[start:end]
_x = self
start = end
end += 68
(_x.action_goal.goal.target_pose.pose.position.x, _x.action_goal.goal.target_pose.pose.position.y, _x.action_goal.goal.target_pose.pose.position.z, _x.action_goal.goal.target_pose.pose.orientation.x, _x.action_goal.goal.target_pose.pose.orientation.y, _x.action_goal.goal.target_pose.pose.orientation.z, _x.action_goal.goal.target_pose.pose.orientation.w, _x.action_result.header.seq, _x.action_result.header.stamp.secs, _x.action_result.header.stamp.nsecs,) = _get_struct_7d3I().unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.action_result.header.frame_id = str[start:end].decode('utf-8')
else:
self.action_result.header.frame_id = str[start:end]
_x = self
start = end
end += 8
(_x.action_result.status.goal_id.stamp.secs, _x.action_result.status.goal_id.stamp.nsecs,) = _get_struct_2I().unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.action_result.status.goal_id.id = str[start:end].decode('utf-8')
else:
self.action_result.status.goal_id.id = str[start:end]
start = end
end += 1
(self.action_result.status.status,) = _get_struct_B().unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.action_result.status.text = str[start:end].decode('utf-8')
else:
self.action_result.status.text = str[start:end]
_x = self
start = end
end += 12
(_x.action_feedback.header.seq, _x.action_feedback.header.stamp.secs, _x.action_feedback.header.stamp.nsecs,) = _get_struct_3I().unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.action_feedback.header.frame_id = str[start:end].decode('utf-8')
else:
self.action_feedback.header.frame_id = str[start:end]
_x = self
start = end
end += 8
(_x.action_feedback.status.goal_id.stamp.secs, _x.action_feedback.status.goal_id.stamp.nsecs,) = _get_struct_2I().unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.action_feedback.status.goal_id.id = str[start:end].decode('utf-8')
else:
self.action_feedback.status.goal_id.id = str[start:end]
start = end
end += 1
(self.action_feedback.status.status,) = _get_struct_B().unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.action_feedback.status.text = str[start:end].decode('utf-8')
else:
self.action_feedback.status.text = str[start:end]
_x = self
start = end
end += 12
(_x.action_feedback.feedback.base_position.header.seq, _x.action_feedback.feedback.base_position.header.stamp.secs, _x.action_feedback.feedback.base_position.header.stamp.nsecs,) = _get_struct_3I().unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.action_feedback.feedback.base_position.header.frame_id = str[start:end].decode('utf-8')
else:
self.action_feedback.feedback.base_position.header.frame_id = str[start:end]
_x = self
start = end
end += 56
(_x.action_feedback.feedback.base_position.pose.position.x, _x.action_feedback.feedback.base_position.pose.position.y, _x.action_feedback.feedback.base_position.pose.position.z, _x.action_feedback.feedback.base_position.pose.orientation.x, _x.action_feedback.feedback.base_position.pose.orientation.y, _x.action_feedback.feedback.base_position.pose.orientation.z, _x.action_feedback.feedback.base_position.pose.orientation.w,) = _get_struct_7d().unpack(str[start:end])
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
_struct_I = genpy.struct_I
def _get_struct_I():
global _struct_I
return _struct_I
_struct_3I = None
def _get_struct_3I():
global _struct_3I
if _struct_3I is None:
_struct_3I = struct.Struct("<3I")
return _struct_3I
_struct_B = None
def _get_struct_B():
global _struct_B
if _struct_B is None:
_struct_B = struct.Struct("<B")
return _struct_B
_struct_7d = None
def _get_struct_7d():
global _struct_7d
if _struct_7d is None:
_struct_7d = struct.Struct("<7d")
return _struct_7d
_struct_2I = None
def _get_struct_2I():
global _struct_2I
if _struct_2I is None:
_struct_2I = struct.Struct("<2I")
return _struct_2I
_struct_7d3I = None
def _get_struct_7d3I():
global _struct_7d3I
if _struct_7d3I is None:
_struct_7d3I = struct.Struct("<7d3I")
return _struct_7d3I
| [
"yowings@gmail.com"
] | yowings@gmail.com |
b0397665744d2c47d95e8a3f5beacc33a6f18cc2 | b92d59b1d78276c2f642b640fbb495fa85e222c9 | /debugger_tools/sitepackages_libs/pygments/filter.py | 809ae3f46745bed427298a40af3b4e9c7a53ca16 | [] | no_license | tin2tin/weed | d69d27ed9fb0273d0bbcbcf6941d9d9bfd4bbb44 | dade41a9f6e82a493d4817d53a5af3dcdf31f21c | refs/heads/master | 2020-12-27T06:21:09.047047 | 2020-02-02T18:03:15 | 2020-02-02T18:03:15 | 237,793,836 | 0 | 0 | null | 2020-02-02T15:43:25 | 2020-02-02T15:43:25 | null | UTF-8 | Python | false | false | 2,145 | py | # -*- coding: utf-8 -*-
"""
pygments.filter
~~~~~~~~~~~~~~~
Module that implements the default filter.
:copyright: Copyright 2006-2013 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
def apply_filters(stream, filters, lexer=None):
"""
Use this method to apply an iterable of filters to
a stream. If lexer is given it's forwarded to the
filter, otherwise the filter receives `None`.
"""
def _apply(filter_, stream):
for token in filter_.filter(lexer, stream):
yield token
for filter_ in filters:
stream = _apply(filter_, stream)
return stream
def simplefilter(f):
"""
Decorator that converts a function into a filter::
@simplefilter
def lowercase(lexer, stream, options):
for ttype, value in stream:
yield ttype, value.lower()
"""
return type(f.__name__, (FunctionFilter,), {
'function': f,
'__module__': getattr(f, '__module__'),
'__doc__': f.__doc__
})
class Filter(object):
"""
Default filter. Subclass this class or use the `simplefilter`
decorator to create own filters.
"""
def __init__(self, **options):
self.options = options
def filter(self, lexer, stream):
raise NotImplementedError()
class FunctionFilter(Filter):
"""
Abstract class used by `simplefilter` to create simple
function filters on the fly. The `simplefilter` decorator
automatically creates subclasses of this class for
functions passed to it.
"""
function = None
def __init__(self, **options):
if not hasattr(self, 'function'):
raise TypeError('%r used without bound function' %
self.__class__.__name__)
Filter.__init__(self, **options)
def filter(self, lexer, stream):
# pylint: disable-msg=E1102
for ttype, value in self.function(lexer, stream, self.options):
yield ttype, value
| [
"cristian@blender.cl"
] | cristian@blender.cl |
3c4e7c48dd94e84539485d6169bb461643a6cde7 | f36e4d032a1f496c006517898659d9e8bb4a5acd | /tutorial 17.py | a8613324631158a3b04142c099ed460e7acff6d9 | [] | no_license | aman0864/CWH_1-Python3-Python-Game-Development | 477c2086de0b53160db798817bcf078d89edde80 | 1ac89070ad1f0c878cd8ff3d80b23d43f9aa9fe2 | refs/heads/main | 2023-04-13T14:36:49.021020 | 2021-05-03T11:49:42 | 2021-05-03T11:49:42 | 363,911,030 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,386 | py | # Date 18-04-2021
import pygame as pg
import random as rd
pg.init()
#! user's choice
fps = 20
font = pg.font.Font('freesansbold.ttf', 32)
color_of_text_to_be_display_on_screen = (233, 54, 100)
x_position_of_text_to_be_display_on_screen = 0
y_position_of_text_to_be_display_on_screen = 0
minimum_range_of_food_from_screen = 150
def food_placement(axis_value_parameter_mimimum, axis_value_parameter_maximum):
"""This function will generate numbers from axis_value_parameter_mimimum t axis_value_parameter_maximum;
And it will store those numbers in a list named screen_width_list_for_food or screen_height_list_for_food
Args:
axis_value_parameter_mimimum ([integer]): [it is the value of minimum range/parameter of food(of snake) from game terminal end]
axis_value_parameter_maximum ([integer]): [it is the value of maximum range/parameter of food(of snake) from game terminal end]
"""
for i in range(axis_value_parameter_mimimum, axis_value_parameter_maximum - axis_value_parameter_mimimum + 1):
if i % 15 == 0:
if axis_value_parameter_maximum == screen_width:
screen_width_list_for_food.append(i)
elif axis_value_parameter_maximum == screen_height:
screen_height_list_for_food.append(i)
def displaying_text_on_game_terminal(text, color, x, y):
text_to_display_on_screen = font.render(text, True, color)
game_window.blit(text_to_display_on_screen, [x, y])
def snake_deployeing(game_window, snake_color, snake_incrementation_list, snake_size_width, snake_size_height):
for snake_position_x, snake_position_y in snake_incrementation_list:
pg.draw.rect(game_window, snake_color, [
snake_position_x, snake_position_y, snake_size_width, snake_size_height])
# if snake_food_position_x > screen_width:
# snake_food_position_x -= screen_width
# elif snake_food_position_y > screen_height:
# snake_food_position_y -= screen_height
# our game colors
screen_background_color = (13, 4, 84)
# creating game window
screen_width = 1920
screen_height = 1080
game_window = pg.display.set_mode((screen_width, screen_height))
# Setting caption of our game window
pg.display.set_caption("Snake Game by Aman")
# todo pg.display.update()
# game specific variables
game_exit = False
game_over = False
clock = pg.time.Clock()
# snake classification
snake_color = (56, 255, 4)
snake_position_x = 300
snake_position_y = 300
snake_size_width = 15
snake_size_height = 15
snake_velocity_x = 0
snake_velocity_y = 0
snake_user_given_velocity = 20
snake_incrementation_list = []
snake_length = 1
# food making for snake
snake_food_color = (246, 200, 40)
snake_food_size_x = 15
snake_food_size_y = 15
screen_width_list_for_food = list()
screen_height_list_for_food = list()
food_placement(minimum_range_of_food_from_screen, screen_width)
food_placement(minimum_range_of_food_from_screen, screen_height)
snake_food_position_x = rd.choice(screen_height_list_for_food)
snake_food_position_y = rd.choice(screen_width_list_for_food)
if snake_food_position_x > screen_width:
snake_food_position_x -= screen_width
elif snake_food_position_y > screen_height:
snake_food_position_y -= screen_height
# making a variable for score
score = 0
while not game_exit:
for event in pg.event.get():
if event.type == pg.QUIT:
game_exit = True
if event.type == pg.KEYDOWN:
if event.key == pg.K_RIGHT:
snake_velocity_y = 0
snake_velocity_x = snake_user_given_velocity
elif event.key == pg.K_LEFT:
snake_velocity_y = 0
snake_velocity_x = -snake_user_given_velocity
elif event.key == pg.K_DOWN:
snake_velocity_x = 0
snake_velocity_y = snake_user_given_velocity
elif event.key == pg.K_UP:
snake_velocity_x = 0
snake_velocity_y = -snake_user_given_velocity
snake_position_x += snake_velocity_x
snake_position_y += snake_velocity_y
if abs(snake_position_x-snake_food_position_x) < 15 and abs(snake_position_y-snake_food_position_y) < 15:
score += 1
# print(f"Score:{score}")
snake_food_position_x = rd.choice(screen_height_list_for_food)
snake_food_position_y = rd.choice(screen_width_list_for_food)
if snake_food_position_x > screen_width:
snake_food_position_x -= screen_width
elif snake_food_position_y > screen_height:
snake_food_position_y -= screen_height
pg.display.set_caption(f"Snake Game by Aman! Your Score is {score}")
snake_length += 20
pg.display.update()
game_window.fill(screen_background_color)
# pg.display.flip()
# pg.draw.rect(game_window, snake_color, [
# snake_position_x, snake_position_y, snake_size_width, snake_size_height])
pg.draw.rect(game_window, snake_food_color, [
snake_food_position_x, snake_food_position_y, snake_food_size_x, snake_food_size_y])
displaying_text_on_game_terminal(f"Score: {score}", color_of_text_to_be_display_on_screen,
x_position_of_text_to_be_display_on_screen, y_position_of_text_to_be_display_on_screen)
displaying_text_on_game_terminal(f"Snake's food x position: {snake_food_position_x}", color_of_text_to_be_display_on_screen,
x_position_of_text_to_be_display_on_screen, y_position_of_text_to_be_display_on_screen+50)
displaying_text_on_game_terminal(f"Snake's food y position: {snake_food_position_y}", color_of_text_to_be_display_on_screen,
x_position_of_text_to_be_display_on_screen, y_position_of_text_to_be_display_on_screen + 100)
snake_head = list()
snake_head.append(snake_position_x)
snake_head.append(snake_position_y)
snake_incrementation_list.append(snake_head)
if len(snake_incrementation_list) > snake_length:
del snake_incrementation_list[0]
snake_deployeing(game_window, snake_color,
snake_incrementation_list, snake_size_width, snake_size_height)
pg.display.update()
clock.tick(fps)
pg.display.update()
| [
"noreply@github.com"
] | noreply@github.com |
6ec1dac28a2dbeb63c8f23d39bb3f862eb5c9205 | 422a0dc42e53052a4d716488b1a43338653383cb | /Sorting and Searching/Sorting_concepts/counting_sort.py | 8882e4c9fe23ffe49ceaafed6e2f7fbe5688c628 | [] | no_license | shiraz-30/Sorting-and-Searching | c2203e1218513aa10631770158a9eb6eeaf99e29 | d50abb95dbdb18421017aba8cc867b3715cb13d9 | refs/heads/main | 2023-05-11T08:25:14.463642 | 2021-05-27T15:24:55 | 2021-05-27T15:24:55 | 371,416,863 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 744 | py | def counting_sort(arr):
# in this implementation we will assume that we get only positive values
max_element = max(arr)
freq = [0]*(max_element + 1)
# this loop will help to create the frequency list
for i in range(0, len(arr)):
freq[arr[i]] += 1
# now we need to prepare prefix sum
for i in range(1, len(freq)):
freq[i] = freq[i] + freq[1 - 1]
output = [0]*len(arr)
for i in range(len(arr) - 1, -1, -1): # going from previous
output[freq[arr[i]] - 1] = arr[i]
freq[arr[i]] -= 1
return output
li = list(map(int, input().split()))
output = counting_sort(li)
print(output)
"""
T.C. -> O(n + K), θ(n + K), Ω(n + K)
S.C. -> O(n + K)
No. of comparisons -> No
No. of swaps -> No
In place -> No
Stability -> Stable
"""
| [
"noreply@github.com"
] | noreply@github.com |
0680b3f3ad147e9f975b450578e76f7ad9b63a6a | 70e29fc402560430eaa76de188ddb223ed543430 | /crawler.py | 53fa72a38561c8f5479da7fa787837fbb53ae008 | [] | no_license | yujin113/RoadMap | 3fde85b41ce8bbd76e160a01d5dbd1d014451765 | c147003f8b317ce2b35f1330f2175ae66919d372 | refs/heads/main | 2023-06-24T19:50:33.351144 | 2021-07-28T10:05:50 | 2021-07-28T10:05:50 | 384,694,697 | 0 | 0 | null | 2021-07-10T12:39:00 | 2021-07-10T12:39:00 | null | UTF-8 | Python | false | false | 5,334 | py | import enum
import os
from time import sleep
from bs4 import BeautifulSoup
from selenium import webdriver
from datetime import date, timedelta
# from selenium.webdriver.common.by import By
# from selenium.webdriver.support.ui import WebDriverWait
# from selenium.webdriver.support import expected_conditions as EC
# from selenium.webdriver.chrome.options import Options
os.environ.setdefault('DJANGO_SETTINGS_MODULE', "RoadmapProject.settings")
import django
django.setup()
from info.models import Recruit
from idpw import naverid, naverpw
def fetch_recruit_latest_data():
driver = webdriver.Chrome('/Users/yujin/sookmyung/chromedriver')
driver.get('https://www.saramin.co.kr/zf_user/jobs/hot100')
driver.implicitly_wait(2)
driver.find_element_by_xpath('//*[@id="sri_header"]/div[1]/div[2]/a[1]/span').click()
driver.implicitly_wait(2)
driver.find_element_by_name('id').send_keys(naverid)
driver.find_element_by_name('password').send_keys(naverpw)
driver.find_element_by_xpath('//*[@id="login_frm"]/div[2]/div[1]/div[2]/button').click()
driver.implicitly_wait(2)
driver.find_element_by_xpath('//*[@id="search_panel_wrapper"]/form/fieldset/div/div[1]/div/div[1]/ul/li[4]/button').click()
sleep(3)
html = BeautifulSoup(driver.page_source, 'html.parser')
banners = html.select('#content > div.recruit_hot_wrap > div.recruit_hot_list > div.careers > div > ul > li')
result=[]
link_root = "https://www.saramin.co.kr"
for i, banner in enumerate(banners):
if i == 0:
continue
corp = banner.select_one('div.area_rank > div.rank_company_info > a > span').text
title = banner.select_one('div.area_detail > a.tit > span').text
dt = banner.select_one('div.area_detail > dl > dd').text
link = link_root + banner.select_one('div.area_detail > a.tit')["href"]
career = banner.select_one('div.area_detail > div > span:nth-child(1)').text
academic = banner.select_one('div.area_detail > div > span:nth-child(2)').text
if banner.select_one('div.area_detail > div > span:nth-child(4)') :
type = banner.select_one('div.area_detail > div > span:nth-child(3)').text
area = banner.select_one('div.area_detail > div > span:nth-child(4)').text
else :
area = banner.select_one('div.area_detail > div > span:nth-child(3)').text
# 연말에 연도 바뀌는 건 아직...?
if "D" in dt:
num = dt[2]
end_date = date.today() + timedelta(int(num))
end_date = end_date.strftime("%Y-%m-%d")
elif "상시" in dt:
end_date = "상시채용"
elif "채용시" in dt:
end_date = "채용시"
elif "오늘마감" in dt:
end_date = date.today()
end_date = end_date.strftime("%Y-%m-%d")
elif "내일마감" in dt:
end_date = date.today() + timedelta(1)
end_date = end_date.strftime("%Y-%m-%d")
if len(dt) > 5:
if dt[5] == "(":
#~ 8/2(
end_date = str(date.today().year) + "-0" + dt[2] + "-0" + dt[4]
elif dt[6] == "(" and dt[3] == "/":
#~ 8/12(
end_date = str(date.today().year) + "-0" + dt[2] + "-" + dt[4:6]
elif dt[6] == "(" and dt[4] == "/":
#~ 12/1(
end_date = str(date.today().year) + "-" + dt[2:4] + "-0" + dt[5]
elif dt[7] == "(":
#~ 12/12(
end_date = str(date.today().year) + "-" + dt[2:4] + "-" + dt[5:7]
item_obj = {
'corp': corp,
'title': title,
'end_date': end_date,
'link': link,
'career': career,
'academic': academic,
'type': type,
'area': area
}
# print(item_obj)
# print()
result.append(item_obj)
driver.close()
return result
def add_new_items(crawled_items):
# Recruit.objects.all().delete()
Recruit.objects.filter(end_date__lt=date.today()).delete() # 날짜 지난 거 삭제
# 상시채용 같은 건 어떻게 업데이트 할건지...?
for item in crawled_items:
try:
object = Recruit.objects.get(link=item['link']) # 링크 같은 건 제외
except Recruit.DoesNotExist:
if "-" in item['end_date']:
Recruit(corp=item['corp'],
title=item['title'],
end_date=item['end_date'],
link=item['link'],
career=item['career'],
academic=item['academic'],
type=item['type'],
area=item['area']
).save()
else:
Recruit(corp=item['corp'],
title=item['title'],
end_date_str=item['end_date'],
link=item['link'],
career=item['career'],
academic=item['academic'],
type=item['type'],
area=item['area']
).save()
if __name__ == '__main__':
add_new_items(fetch_recruit_latest_data())
| [
"claire9585@sookmyung.ac.kr"
] | claire9585@sookmyung.ac.kr |
63f33a87835b8770a6f52247450c589c170448cc | 48e124e97cc776feb0ad6d17b9ef1dfa24e2e474 | /sdk/python/pulumi_azure_native/kusto/v20210827/get_data_connection.py | 10c402e25115ff6f6d20ff26cd07edd7efef74c5 | [
"BSD-3-Clause",
"Apache-2.0"
] | permissive | bpkgoud/pulumi-azure-native | 0817502630062efbc35134410c4a784b61a4736d | a3215fe1b87fba69294f248017b1591767c2b96c | refs/heads/master | 2023-08-29T22:39:49.984212 | 2021-11-15T12:43:41 | 2021-11-15T12:43:41 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,654 | py | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
__all__ = [
'GetDataConnectionResult',
'AwaitableGetDataConnectionResult',
'get_data_connection',
'get_data_connection_output',
]
warnings.warn("""Please use one of the variants: EventGridDataConnection, EventHubDataConnection, IotHubDataConnection.""", DeprecationWarning)
@pulumi.output_type
class GetDataConnectionResult:
"""
Class representing an data connection.
"""
def __init__(__self__, id=None, kind=None, location=None, name=None, type=None):
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if kind and not isinstance(kind, str):
raise TypeError("Expected argument 'kind' to be a str")
pulumi.set(__self__, "kind", kind)
if location and not isinstance(location, str):
raise TypeError("Expected argument 'location' to be a str")
pulumi.set(__self__, "location", location)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter
def id(self) -> str:
"""
Fully qualified resource ID for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def kind(self) -> str:
"""
Kind of the endpoint for the data connection
"""
return pulumi.get(self, "kind")
@property
@pulumi.getter
def location(self) -> Optional[str]:
"""
Resource location.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> str:
"""
The name of the resource
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def type(self) -> str:
"""
The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts"
"""
return pulumi.get(self, "type")
class AwaitableGetDataConnectionResult(GetDataConnectionResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetDataConnectionResult(
id=self.id,
kind=self.kind,
location=self.location,
name=self.name,
type=self.type)
def get_data_connection(cluster_name: Optional[str] = None,
data_connection_name: Optional[str] = None,
database_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetDataConnectionResult:
"""
Class representing an data connection.
:param str cluster_name: The name of the Kusto cluster.
:param str data_connection_name: The name of the data connection.
:param str database_name: The name of the database in the Kusto cluster.
:param str resource_group_name: The name of the resource group containing the Kusto cluster.
"""
pulumi.log.warn("""get_data_connection is deprecated: Please use one of the variants: EventGridDataConnection, EventHubDataConnection, IotHubDataConnection.""")
__args__ = dict()
__args__['clusterName'] = cluster_name
__args__['dataConnectionName'] = data_connection_name
__args__['databaseName'] = database_name
__args__['resourceGroupName'] = resource_group_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-native:kusto/v20210827:getDataConnection', __args__, opts=opts, typ=GetDataConnectionResult).value
return AwaitableGetDataConnectionResult(
id=__ret__.id,
kind=__ret__.kind,
location=__ret__.location,
name=__ret__.name,
type=__ret__.type)
@_utilities.lift_output_func(get_data_connection)
def get_data_connection_output(cluster_name: Optional[pulumi.Input[str]] = None,
data_connection_name: Optional[pulumi.Input[str]] = None,
database_name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetDataConnectionResult]:
"""
Class representing an data connection.
:param str cluster_name: The name of the Kusto cluster.
:param str data_connection_name: The name of the data connection.
:param str database_name: The name of the database in the Kusto cluster.
:param str resource_group_name: The name of the resource group containing the Kusto cluster.
"""
pulumi.log.warn("""get_data_connection is deprecated: Please use one of the variants: EventGridDataConnection, EventHubDataConnection, IotHubDataConnection.""")
...
| [
"noreply@github.com"
] | noreply@github.com |
65ca33b94667b1d1345a91a0f683703829234a7c | b4c5c90d37ec3258cc4facd2116385a113e385b7 | /che_homework.py | 03e9a5f6160c86bdfdd5ca7e22c2bbd5c102d773 | [] | no_license | ahoetker/ghscripts | e964f0acb06c416f0912a76c0d3aa5a70431a1e2 | 8a953fdc53e29b87ee6c0421f7dca27acc9768c4 | refs/heads/master | 2021-05-09T02:27:06.768710 | 2019-01-05T03:38:32 | 2019-01-05T03:38:32 | 119,209,687 | 0 | 0 | null | 2018-03-28T19:31:58 | 2018-01-27T22:57:30 | Python | UTF-8 | Python | false | false | 1,854 | py | #!/usr/bin/env python3
from pathlib import Path
import sys
unitstring_code = r"""function displayString = unitString(quantity, name)
%UNITSTRING Display a 1x2 sym with symbolic units
% USAGE: unitString(some_quantity, name)
% OUTPUT:
% - displayString: char vector containing name, scalar, and units
if nargin < 2
n = inputname(1);
else
n = name;
end
[s, U] = separateUnits(quantity);
formatSpec = '%s: %s %s';
displayString = sprintf(formatSpec, n, num2str(double(s)), symunit2str(U));
end"""
preamble_code = r"""% Math stuff
\usepackage{amsmath}
\usepackage{amsfonts}
\usepackage{amssymb}
% Fancy references
\usepackage{varioref}
\labelformat{equation}{equation~(#1)}
% XeLaTeX Fonts
\usepackage{unicode-math}
\setmainfont{STIX2Text-Regular.otf}
\setmathfont{STIX2Math.otf}
% Hyperlinks
\usepackage{color}
\definecolor{mygrey}{gray}{0.25}
\usepackage[pdfborder=0, colorlinks=true, urlcolor=blue, linkcolor=mygrey]{hyperref}
% Page layout
\usepackage[margin=1in]{geometry}
% Header definition
\usepackage{fancyhdr}
\pagestyle{fancy}
\lhead{Andrew Hoetker - ASUID 1207233777}
\rhead{CHE 334 Homework 12}"""
def create_header(assn_name = "ASSN NAME", due_date="DUE DATE", class_text="CLASS TEXT"):
header_text = r"""%% ASSN NAME"""
return header_text
def create_homework():
homework_name = str(sys.argv[1])
homework_dir = Path(homework_name)
Path.mkdir(homework_dir)
header_file = Path(homework_dir / "header.m")
unitstring_file = Path(homework_dir / "unitString.m")
preamble_file = Path(homework_dir / "preamble.tex")
with header_file.open("w") as f:
f.write(create_header(assn_name=homework_name))
with unitstring_file.open("w") as f:
f.write(unitstring_code)
with preamble_file.open("w") as f:
f.write(preamble_code)
if __name__ == "__main__":
create_homework()
| [
"ahoetker@me.com"
] | ahoetker@me.com |
416fe9fc3c5308c62242cbdd1683c5aca460fdec | 4dfe5d0b0854228a671f094dbdbe4e10ac1e0206 | /finick/forefront.py | eb8d4c033b2a4459503e402ab1e055d7f0a77786 | [] | no_license | cedrus-opensource/finick_code_reviews | 35933f086fcbca8aa4367b34a5c23382320fc2e3 | 38ca4ec629e08b774c83ade478d9f2ff62e06c2f | refs/heads/master | 2020-04-29T09:13:29.201840 | 2015-10-22T22:32:16 | 2015-10-22T22:32:16 | 35,689,292 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 223 | py | #!/usr/bin/env python
from __future__ import print_function
from __future__ import division # py3 style. division promotes to floating point.
from __future__ import unicode_literals
from __future__ import absolute_import
| [
"kkheller@cedrus.com"
] | kkheller@cedrus.com |
a9bc844909b2b692321a5de50217b83eb922f2c2 | 06deaa602f3aa2caf78a7c0686f5ed4b1505abed | /packet_count.py | cf403499c375131ae02845700515f1fbcc967e72 | [] | no_license | vksh224/d-DSA-simulator | 3961f7f71eac361b44a13a02e7d0b3b62ac7269f | 27dc2782ad5bd90efd8fdc37397d002878a3c690 | refs/heads/master | 2020-07-10T06:15:24.448517 | 2019-08-24T17:27:59 | 2019-08-24T17:27:59 | 204,189,894 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 580 | py |
path = "Generated_Messages/generated_messages"
max = 0
min = 99999
avg = 0
for i in range(100):
file = path + str(i) + ".txt"
with open(file) as f:
lines = f.readlines()[1:]
total_mb = 0
for line in lines:
line_arr = line.strip().split()
if int(line_arr[4]) == 60:
total_mb += 300
else:
total_mb += int(line_arr[4])
packets = total_mb/300
avg += packets
if packets > max:
max = packets
if packets < min:
min = packets
print("Max:", max, "Min:", min, "AVG:", avg/100)
| [
"noreply@github.com"
] | noreply@github.com |
98cb85d1402933244f795a346bdc4fd0313236fe | 6fcfb638fa725b6d21083ec54e3609fc1b287d9e | /python/scrapy_scrapy/scrapy-master/scrapy/commands/startproject.py | 5941066326a89f8907da69a7681f54c726320d4d | [] | no_license | LiuFang816/SALSTM_py_data | 6db258e51858aeff14af38898fef715b46980ac1 | d494b3041069d377d6a7a9c296a14334f2fa5acc | refs/heads/master | 2022-12-25T06:39:52.222097 | 2019-12-12T08:49:07 | 2019-12-12T08:49:07 | 227,546,525 | 10 | 7 | null | 2022-12-19T02:53:01 | 2019-12-12T07:29:39 | Python | UTF-8 | Python | false | false | 3,863 | py | from __future__ import print_function
import re
import os
import string
from importlib import import_module
from os.path import join, exists, abspath
from shutil import ignore_patterns, move, copy2, copystat
import scrapy
from scrapy.commands import ScrapyCommand
from scrapy.utils.template import render_templatefile, string_camelcase
from scrapy.exceptions import UsageError
TEMPLATES_TO_RENDER = (
('scrapy.cfg',),
('${project_name}', 'settings.py.tmpl'),
('${project_name}', 'items.py.tmpl'),
('${project_name}', 'pipelines.py.tmpl'),
('${project_name}', 'middlewares.py.tmpl'),
)
IGNORE = ignore_patterns('*.pyc', '.svn')
class Command(ScrapyCommand):
requires_project = False
default_settings = {'LOG_ENABLED': False}
def syntax(self):
return "<project_name> [project_dir]"
def short_desc(self):
return "Create new project"
def _is_valid_name(self, project_name):
def _module_exists(module_name):
try:
import_module(module_name)
return True
except ImportError:
return False
if not re.search(r'^[_a-zA-Z]\w*$', project_name):
print('Error: Project names must begin with a letter and contain'\
' only\nletters, numbers and underscores')
elif _module_exists(project_name):
print('Error: Module %r already exists' % project_name)
else:
return True
return False
def _copytree(self, src, dst):
"""
Since the original function always creates the directory, to resolve
the issue a new function had to be created. It's a simple copy and
was reduced for this case.
More info at:
https://github.com/scrapy/scrapy/pull/2005
"""
ignore = IGNORE
names = os.listdir(src)
ignored_names = ignore(src, names)
if not os.path.exists(dst):
os.makedirs(dst)
for name in names:
if name in ignored_names:
continue
srcname = os.path.join(src, name)
dstname = os.path.join(dst, name)
if os.path.isdir(srcname):
self._copytree(srcname, dstname)
else:
copy2(srcname, dstname)
copystat(src, dst)
def run(self, args, opts):
if len(args) not in (1, 2):
raise UsageError()
project_name = args[0]
project_dir = args[0]
if len(args) == 2:
project_dir = args[1]
if exists(join(project_dir, 'scrapy.cfg')):
self.exitcode = 1
print('Error: scrapy.cfg already exists in %s' % abspath(project_dir))
return
if not self._is_valid_name(project_name):
self.exitcode = 1
return
self._copytree(self.templates_dir, abspath(project_dir))
move(join(project_dir, 'module'), join(project_dir, project_name))
for paths in TEMPLATES_TO_RENDER:
path = join(*paths)
tplfile = join(project_dir,
string.Template(path).substitute(project_name=project_name))
render_templatefile(tplfile, project_name=project_name,
ProjectName=string_camelcase(project_name))
print("New Scrapy project %r, using template directory %r, created in:" % \
(project_name, self.templates_dir))
print(" %s\n" % abspath(project_dir))
print("You can start your first spider with:")
print(" cd %s" % project_dir)
print(" scrapy genspider example example.com")
@property
def templates_dir(self):
_templates_base_dir = self.settings['TEMPLATES_DIR'] or \
join(scrapy.__path__[0], 'templates')
return join(_templates_base_dir, 'project')
| [
"659338505@qq.com"
] | 659338505@qq.com |
5c36c60d11dde9fbccd0c50a805bae829cce3134 | cf431b103b6481a8ec3002277fbd1e4c8fc02223 | /tldrdict/update.py | 0b9859ca0f023a1a8d5209e7a1504e71e2bde05e | [
"MIT"
] | permissive | stjordanis/tldr-flutter | e418930bc9404aa230643a75093306aad141ff76 | 59683860a7eae5bca0d14fa5cc10a1477db5a7fd | refs/heads/master | 2023-04-30T12:37:05.765644 | 2021-05-19T18:31:08 | 2021-05-19T18:31:08 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 747 | py | import json
from os import path
import json
import requests
def get_dict():
r = requests.get(url="https://tldr.sh/assets/")
data = json.loads(r.text)
commands = {}
for command in range(len(data["commands"])):
commands[data["commands"][command]["name"]
] = data["commands"][command]["platform"][0]
return commands
def update_commands_file():
"""Run scheduled job to update commands file."""
_ = get_dict()
data = json.dumps(_)
try:
file = open('static/commands.txt', 'wt')
file.write(data)
file.close()
print("Commands added")
except:
print
print("Unable to write to file")
if __name__ == '__main__':
update_commands_file()
| [
"nitinnirve@gmail.com"
] | nitinnirve@gmail.com |
ced912642f1ce175762a59d39ca23a5b70a8c4c3 | c70dacbe730ac1aeb1aea5ebb4676b8af8cba863 | /8 Functions/8-13 User Profile.py | 46aaa7a4ee869a4921e833fa07b492de8b1f2c14 | [] | no_license | KotaCanchela/PythonCrashCourse | 41c80024863c6428216b28227e2a25566222e266 | 81bbfd6d2e9bdf087ea2f4401d4f489e98d5023a | refs/heads/master | 2022-11-29T09:10:40.841500 | 2020-08-05T16:00:31 | 2020-08-05T16:00:31 | 282,297,280 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 453 | py | # Build a profile of yourself by calling build_profile(),
# using your first and last names and three other key-value pairs that describe you
def build_profile(first, last, **kwargs):
kwargs['first_name'] = first.title()
kwargs['last name'] = last.title()
return kwargs
user_profile = build_profile('dakota', 'canchela',
height="5'6", age=22,
location='Paris')
print(user_profile) | [
"dakotacanchela@gmail.com"
] | dakotacanchela@gmail.com |
a015fd2835d1017c32b4f5d5ad8ec3e72eb99d16 | a78f0d96c33d8e3399bffa85ffba5c8e598e8492 | /Array/55_sort_wave.py | 94cb0b44a844fba62865e284c91e58d6ea58cb23 | [] | no_license | ANKITPODDER2000/data-structure | 78203fabf9ea7ef580d41d4d44cbff1e6c9f397d | 3c1542562e74c0888718273e16206a755b193d4e | refs/heads/main | 2023-02-04T15:40:21.017573 | 2020-12-31T10:45:18 | 2020-12-31T10:45:18 | 325,778,614 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 447 | py | from get_array_helper import take_array_user
def sort_wave(arr , n):
for i in range(0 , n , 2):
if arr[i]<arr[i-1] and i > 0:
arr[i] , arr[i-1] = arr[i-1] , arr[i]
if arr[i]<arr[i+1] and i<n-1:
arr[i] , arr[i+1] = arr[i+1] , arr[i]
arr , n = take_array_user()
print("Sorting in wave form .....")
sort_wave(arr , n)
print("Sorting done .....")
print("Array after sorting in wave form : ",arr)
| [
"ankitpodder0211@gmail.com"
] | ankitpodder0211@gmail.com |
1c69bff81bb225ea2dc065b837006a7c0491647f | 55cf4226203718c6adcedba16272b9b2d39ab6de | /hw/hw3/dqn-car/train_pytorch_docs.py | b8a052ad71193a37eb2f8cf59efdb4831eb5ff81 | [] | no_license | nikhilvsupekar/deep-rl | 0e299f88d2894cf8e375a59f2ba2c7c666616ac7 | fe2e72d90292d0309bcd838f95ccf16b6d186e08 | refs/heads/master | 2023-01-19T06:31:40.771757 | 2020-11-17T03:13:42 | 2020-11-17T03:13:42 | 296,128,063 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,698 | py | import gym
import math
import random
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
from collections import namedtuple
from itertools import count
from PIL import Image
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
import torchvision.transforms as T
Transition = namedtuple(
'Transition',
('state', 'action', 'next_state', 'reward')
)
class ReplayMemory(object):
def __init__(self, capacity):
self.capacity = capacity
self.memory = []
self.position = 0
def push(self, *args):
"""Saves a transition."""
if len(self.memory) < self.capacity:
self.memory.append(None)
self.memory[self.position] = Transition(*args)
self.position = (self.position + 1) % self.capacity
def sample(self, batch_size):
return random.sample(self.memory, batch_size)
def __len__(self):
return len(self.memory)
class DQN(nn.Module):
def __init__(self, h, w, outputs):
super(DQN, self).__init__()
self.conv1 = nn.Conv2d(3, 16, kernel_size=5, stride=2)
self.bn1 = nn.BatchNorm2d(16)
self.conv2 = nn.Conv2d(16, 32, kernel_size=5, stride=2)
self.bn2 = nn.BatchNorm2d(32)
self.conv3 = nn.Conv2d(32, 32, kernel_size=5, stride=2)
self.bn3 = nn.BatchNorm2d(32)
# Number of Linear input connections depends on output of conv2d layers
# and therefore the input image size, so compute it.
def conv2d_size_out(size, kernel_size = 5, stride = 2):
return (size - (kernel_size - 1) - 1) // stride + 1
convw = conv2d_size_out(conv2d_size_out(conv2d_size_out(w)))
convh = conv2d_size_out(conv2d_size_out(conv2d_size_out(h)))
linear_input_size = convw * convh * 32
self.head = nn.Linear(linear_input_size, outputs)
# Called with either one element to determine next action, or a batch
# during optimization. Returns tensor([[left0exp,right0exp]...]).
def forward(self, x):
x = F.relu(self.bn1(self.conv1(x)))
x = F.relu(self.bn2(self.conv2(x)))
x = F.relu(self.bn3(self.conv3(x)))
return self.head(x.view(x.size(0), -1))
resize = T.Compose([T.ToPILImage(),
T.Resize(40, interpolation=Image.CUBIC),
T.ToTensor()])
def get_cart_location(screen_width):
world_width = env.x_threshold * 2
scale = screen_width / world_width
return int(env.state[0] * scale + screen_width / 2.0) # MIDDLE OF CART
def get_screen():
# Returned screen requested by gym is 400x600x3, but is sometimes larger
# such as 800x1200x3. Transpose it into torch order (CHW).
screen = env.render(mode='rgb_array').transpose((2, 0, 1))
# Cart is in the lower half, so strip off the top and bottom of the screen
_, screen_height, screen_width = screen.shape
screen = screen[:, int(screen_height*0.4):int(screen_height * 0.8)]
view_width = int(screen_width * 0.6)
cart_location = get_cart_location(screen_width)
if cart_location < view_width // 2:
slice_range = slice(view_width)
elif cart_location > (screen_width - view_width // 2):
slice_range = slice(-view_width, None)
else:
slice_range = slice(cart_location - view_width // 2,
cart_location + view_width // 2)
# Strip off the edges, so that we have a square image centered on a cart
screen = screen[:, :, slice_range]
# Convert to float, rescale, convert to torch tensor
# (this doesn't require a copy)
screen = np.ascontiguousarray(screen, dtype=np.float32) / 255
screen = torch.from_numpy(screen)
# Resize, and add a batch dimension (BCHW)
return resize(screen).unsqueeze(0).to(device)
env = gym.make('CartPole-v0').unwrapped
# set up matplotlib
is_ipython = 'inline' in matplotlib.get_backend()
if is_ipython:
from IPython import display
plt.ion()
# if gpu is to be used
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
env.reset()
plt.figure()
plt.imshow(get_screen().cpu().squeeze(0).permute(1, 2, 0).numpy(),
interpolation='none')
plt.title('Example extracted screen')
plt.show()
BATCH_SIZE = 128
GAMMA = 0.999
EPS_START = 0.9
EPS_END = 0.05
EPS_DECAY = 200
TARGET_UPDATE = 10
# Get screen size so that we can initialize layers correctly based on shape
# returned from AI gym. Typical dimensions at this point are close to 3x40x90
# which is the result of a clamped and down-scaled render buffer in get_screen()
init_screen = get_screen()
_, _, screen_height, screen_width = init_screen.shape
# Get number of actions from gym action space
n_actions = env.action_space.n
policy_net = DQN(screen_height, screen_width, n_actions).to(device)
target_net = DQN(screen_height, screen_width, n_actions).to(device)
target_net.load_state_dict(policy_net.state_dict())
target_net.eval()
optimizer = optim.RMSprop(policy_net.parameters())
memory = ReplayMemory(10000)
steps_done = 0
def select_action(state):
global steps_done
sample = random.random()
eps_threshold = EPS_END + (EPS_START - EPS_END) * \
math.exp(-1. * steps_done / EPS_DECAY)
steps_done += 1
if sample > eps_threshold:
with torch.no_grad():
# t.max(1) will return largest column value of each row.
# second column on max result is index of where max element was
# found, so we pick action with the larger expected reward.
return policy_net(state).max(1)[1].view(1, 1)
else:
return torch.tensor([[random.randrange(n_actions)]], device=device, dtype=torch.long)
episode_durations = []
def plot_durations():
plt.figure(2)
plt.clf()
durations_t = torch.tensor(episode_durations, dtype=torch.float)
plt.title('Training...')
plt.xlabel('Episode')
plt.ylabel('Duration')
plt.plot(durations_t.numpy())
# Take 100 episode averages and plot them too
if len(durations_t) >= 100:
means = durations_t.unfold(0, 100, 1).mean(1).view(-1)
means = torch.cat((torch.zeros(99), means))
plt.plot(means.numpy())
plt.pause(0.001) # pause a bit so that plots are updated
if is_ipython:
display.clear_output(wait=True)
display.display(plt.gcf())
def optimize_model():
if len(memory) < BATCH_SIZE:
return
transitions = memory.sample(BATCH_SIZE)
# Transpose the batch (see https://stackoverflow.com/a/19343/3343043 for
# detailed explanation). This converts batch-array of Transitions
# to Transition of batch-arrays.
batch = Transition(*zip(*transitions))
# Compute a mask of non-final states and concatenate the batch elements
# (a final state would've been the one after which simulation ended)
non_final_mask = torch.tensor(tuple(map(lambda s: s is not None,
batch.next_state)), device=device, dtype=torch.bool)
non_final_next_states = torch.cat([s for s in batch.next_state
if s is not None])
state_batch = torch.cat(batch.state)
action_batch = torch.cat(batch.action)
reward_batch = torch.cat(batch.reward)
# Compute Q(s_t, a) - the model computes Q(s_t), then we select the
# columns of actions taken. These are the actions which would've been taken
# for each batch state according to policy_net
state_action_values = policy_net(state_batch).gather(1, action_batch)
# Compute V(s_{t+1}) for all next states.
# Expected values of actions for non_final_next_states are computed based
# on the "older" target_net; selecting their best reward with max(1)[0].
# This is merged based on the mask, such that we'll have either the expected
# state value or 0 in case the state was final.
next_state_values = torch.zeros(BATCH_SIZE, device=device)
next_state_values[non_final_mask] = target_net(non_final_next_states).max(1)[0].detach()
# Compute the expected Q values
expected_state_action_values = (next_state_values * GAMMA) + reward_batch
# Compute Huber loss
loss = F.smooth_l1_loss(state_action_values, expected_state_action_values.unsqueeze(1))
# Optimize the model
optimizer.zero_grad()
loss.backward()
for param in policy_net.parameters():
param.grad.data.clamp_(-1, 1)
optimizer.step()
num_episodes = 50
for i_episode in range(num_episodes):
# Initialize the environment and state
env.reset()
last_screen = get_screen()
current_screen = get_screen()
state = current_screen - last_screen
for t in count():
# Select and perform an action
action = select_action(state)
_, reward, done, _ = env.step(action.item())
reward = torch.tensor([reward], device=device)
# Observe new state
last_screen = current_screen
current_screen = get_screen()
if not done:
next_state = current_screen - last_screen
else:
next_state = None
# Store the transition in memory
memory.push(state, action, next_state, reward)
# Move to the next state
state = next_state
# Perform one step of the optimization (on the target network)
optimize_model()
if done:
episode_durations.append(t + 1)
plot_durations()
break
# Update the target network, copying all weights and biases in DQN
if i_episode % TARGET_UPDATE == 0:
target_net.load_state_dict(policy_net.state_dict())
print('Complete')
env.render()
env.close()
plt.ioff()
plt.show()
| [
"nikhilvsupekar@gmail.com"
] | nikhilvsupekar@gmail.com |
04b1c38f198cef269dfcbb6e64d9d206b337bd6b | 14d0ab117566a725f110683dc177bcc3cf143641 | /Data_loader.py | e4f15f746b84f6a8dc116581869c788d9e2faac3 | [] | no_license | mzarejko/Fighter_bot | bc1fe3d174e5c9f880439b2bb1094e3cd545061c | a397802e53a841dbade92c2e89ef992c8809f866 | refs/heads/master | 2023-05-14T06:26:19.825920 | 2021-06-04T20:14:47 | 2021-06-04T20:14:47 | 288,153,877 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,124 | py | import Settings
from mss import mss
import cv2
import numpy as np
import pickle
import os
import cv2
import time
import shutil
import matplotlib.pyplot as plt
class Data_loader:
'''
class for saving, creating, loading data
updating work base on Data_updater
'''
def __init__(self, label_idx=None):
self.__name_screenshots = "sequence"
self.__name_screenshots_folder = "screenshot"
self.__label = [0 for _ in range(len(Settings.LABEL_CLASS.values()))]
if label_idx != None:
self.__label[label_idx] = 1
else:
self.__label = None
self.__seq_delay = 0.01
self.__first_delay = 0.3
# loop for finding last number of folder with image sequence
def __get_next_folder(self, path):
offset_folder = [0]
for folder in os.listdir(path):
num = int(folder.split('_')[-1])
offset_folder.append(num)
# if all dir empty then create first folder
if not os.path.isdir(path+self.__name_screenshots_folder+'_'+str(max(offset_folder))):
return path + self.__name_screenshots_folder + '_' + str(max(offset_folder))+ '/'
# for checking if last folder is full
if len(os.listdir(path+self.__name_screenshots_folder+'_'+str(max(offset_folder)))) == Settings.TIME_STEP:
dir = path+self.__name_screenshots_folder+'_'+str(max(offset_folder)+1) +'/'
else:
dir = path + self.__name_screenshots_folder + '_' + str(max(offset_folder)) + '/'
return dir
def __get_next_file(self, path, type='png'):
offset_file = [-1]
for file in os.listdir(path):
num = int(file.split('_')[-1].split('.')[0])
offset_file.append(num)
return path + self.__name_screenshots+'_'+str(max(offset_file)+1)+'.'+type
def save_images(self, data, path):
for img in data:
if not os.path.isdir(path):
os.mkdir(path)
folder = self.__get_next_folder(path)
if not os.path.isdir(folder):
os.mkdir(folder)
dir = self.__get_next_file(folder)
cv2.imwrite(dir, img)
def picke_data(self, data, path):
dir = self.__get_next_file(path, type='pickle')
with open(dir, 'wb') as f:
pickle.dump(data, f)
def __get_img(self):
with mss() as sct :
time.sleep(self.__seq_delay)
img = sct.grab(Settings.MONITOR)
img = np.array(img)
img = cv2.resize(img, (Settings.WIDTH_SCREEN_DATA, Settings.HEIGHT_SCREEN_DATA))
img = cv2.cvtColor(img, cv2.COLOR_BGRA2BGR)
img_bar = img[Settings.RESIZE_MONITOR_HP['top']:Settings.RESIZE_MONITOR_HP['height']+Settings.RESIZE_MONITOR_HP['top'],
Settings.RESIZE_MONITOR_HP['left']:Settings.RESIZE_MONITOR_HP['width']+Settings.RESIZE_MONITOR_HP['left']]
img_bar = cv2.cvtColor(img_bar, cv2.COLOR_BGRA2BGR)
return img_bar
def load_sequence(self):
screen_data = []
time.sleep(self.__first_delay)
for step in range(Settings.TIME_STEP):
img = self.__get_img()
screen_data.append(img)
return np.array(screen_data)
def update_data(self, labels_path, features_path):
if self.__label is None:
raise Exception('idx label None!')
#this delay is to reduce number of TIME STEPS
screen_data = self.load_sequence()
self.save_images(screen_data, labels_path)
self.picke_data(self.__label, features_path)
print(self.__label)
def check_label_idx(self):
if self.__label is None:
raise Exception('idx label None!')
return list(Settings.LABEL_CLASS.keys())[np.argmax(self.__label)]
def copy_folder(self, src, dst):
shutil.copytree(str(src),str(dst))
def copy_file(self, src, dst):
shutil.copy(src,dst)
def get_paths(self, path):
paths = []
for p in os.listdir(path):
paths.append(path+p)
return sorted(paths)
| [
"haker@vostro.home"
] | haker@vostro.home |
c1e59c62d04cb617cee1181bdc687ea5152a9f88 | 86133ba216191a0b63eeddd4669846c597ec68be | /python/lsst/iip/AuditWeb/messages/display/migrations/0002_auto_20170609_1537.py | 2e1f05d60c0dbcd7aa49a3970bbcb90f08e4a7e1 | [] | no_license | batmanuel-sandbox/ctrl_iip | a4898f830a3c007ba58ef755ee269f2b138ca7e8 | 9f6ab8b9e1da994b719c62c9390941b6176b01a6 | refs/heads/master | 2020-03-17T18:50:44.104639 | 2018-04-12T13:05:05 | 2018-04-12T13:05:05 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 705 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.2 on 2017-06-09 15:37
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('display', '0001_initial'),
]
operations = [
migrations.RemoveField(
model_name='machines',
name='job',
),
migrations.RemoveField(
model_name='machines',
name='job_pairs',
),
migrations.RemoveField(
model_name='machines',
name='measurement',
),
migrations.RemoveField(
model_name='machines',
name='session',
),
]
| [
"estherwuying@gmail.com"
] | estherwuying@gmail.com |
8e3433cc468d8d0c729fe477b522903a60d3acd2 | e27333261b8e579564016c71d2061cc33972a8b8 | /.history/api/UnigramLanguageModelImplementation_20210809170904.py | 6ea53ba81f55833e5414f6e86eea471894cdaf2c | [] | no_license | Dustyik/NewsTweet_InformationRetrieval | 882e63dd20bc9101cbf48afa6c3302febf1989b1 | d9a6d92b51c288f5bcd21ea1cc54772910fa58f7 | refs/heads/master | 2023-07-01T09:12:53.215563 | 2021-08-12T08:28:33 | 2021-08-12T08:28:33 | 382,780,359 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,153 | py | import math
from IPython.display import display
import sys
from BM25implementation import QueryParsers
ALPHA = 0.75
NORMALIZE_PROBABILITY = True
class UnigramLanguageModel:
def __init__(self, tweets_data): #tweets is a pandas dataframe
self.tweets_data = tweets_data
self.wordsCollectionFrequencyDictionary = self.create_words_frequency_dict(tweets_data)
def create_words_frequency_dict(self, tweets_data, collection = True):
word_frequency_dictionary = {}
if collection:
tweets = tweets_data.clean_text.tolist()
for sentence in tweets:
sentence_list = list(sentence.split(" "))
for word in sentence_list:
if word in word_frequency_dictionary:
word_frequency_dictionary[word] += 1
else:
word_frequency_dictionary[word] = 1
else:
for word in tweets_data:
if word in word_frequency_dictionary:
word_frequency_dictionary[word] += 1
else:
word_frequency_dictionary[word] = 1
return word_frequency_dictionary
def calculate_total_no_of_words(self, wordsCollectionFrequencyDictionary):
values = wordsCollectionFrequencyDictionary.values()
total = sum(values)
return total
def calculate_unigram_probability(self, word: str, wordCollectionFrequencyDictionary):
totalNumberOfWords = self.calculate_total_no_of_words(wordCollectionFrequencyDictionary)
try:
value = wordCollectionFrequencyDictionary[word]/totalNumberOfWords
except KeyError as ke:
value = 1/totalNumberOfWords #add one smoothing for documents
print (word)
print (wordCollectionFrequencyDictionary)
print (value, totalNumberOfWords)
return value
def calculate_interpolated_sentence_probability(self, querySentence:list, document, alpha=ALPHA, normalize_probability=NORMALIZE_PROBABILITY):
total_score = 1
list_of_strings = list(document.split(" "))
print (list_of_strings)
documentWordFrequencyDictionary = self.create_words_frequency_dict(list_of_strings, collection = False)
for word in querySentence:
score_of_word = alpha*(self.calculate_unigram_probability(word, documentWordFrequencyDictionary)) + (1 - alpha)*(self.calculate_unigram_probability(word, self.wordsCollectionFrequencyDictionary))
total_score *= score_of_word
sys.exit()
if normalize_probability == True:
return total_score
else:
return (math.log(total_score)/math.log(2))
def getQueryLikelihoodModelScore(self, querySentence:list):
querySentenceList = QueryParsers(querySentence).query
self.tweets_data["QueryLikelihoodModelScore"] = self.tweets_data.apply(lambda row: self.calculate_interpolated_sentence_probability(querySentenceList, row.clean_text), axis = 1)
#display(self.tweets_data)
return
| [
"chiayik_tan@mymail.sutd.edu.sg"
] | chiayik_tan@mymail.sutd.edu.sg |
d5531ea201513c2ec0e790cc511f138883c4d7e5 | f1447e5389d6372a6a84d51eda5520e1874f9604 | /CFG_Test.py | ae2716b5940177f06f25f4100fd867df882c1522 | [] | no_license | karimlasri/ivf_projet | afd32c78861ec3c7934bb1dd2236ce04332c5baf | 4f433a4bf4e0ba02035f4dea29675d90e485d509 | refs/heads/master | 2021-05-05T06:22:38.946363 | 2018-01-24T17:01:17 | 2018-01-24T17:01:17 | 118,795,025 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,189 | py | import networkx as nx
import matplotlib.pyplot as plt
def create_graph():
G = nx.DiGraph()
G.add_node(1)
G.add_nodes_from([2,3,4,5,6,7])
G.add_edge(1, 2, cond = lambda dic : dic['x']<=0, cmd = lambda dic : None, cmd_type = None)#vars = ['x']
G.add_edge(1, 3, cond = lambda dic : not(dic['x']<=0), cmd = lambda dic : None, cmd_type = None)
G.add_edge(2, 4, cond = lambda dic : True, cmd = lambda dic : dic.update({'x':-dic['x']}), cmd_type = 'assign')
G.add_edge(3, 4, cond = lambda dic : True, cmd = lambda dic : dic.update({'x':1-dic['x']}), cmd_type = 'assign')
G.add_edge(4, 5, cond = lambda dic : dic['x']==1, cmd = lambda dic : None, cmd_type = None)
G.add_edge(4, 6, cond = lambda dic : not(dic['x']==1), cmd = lambda dic : None, cmd_type = None)
G.add_edge(5, 7, cond = lambda dic : True, cmd = lambda dic : dic.update({'x':1}), cmd_type = 'assign')
G.add_edge(6, 7, cond = lambda dic : True, cmd = lambda dic : dic.update({'x':1}), cmd_type = 'assign')
return G
def draw_graph(G):
layout = nx.spring_layout(G)
nx.draw(G, with_labels=True, pos = layout)
nx.draw_networkx_edge_labels(G, pos = layout)
plt.show()
| [
"karim.lasri@student.ecp.fr"
] | karim.lasri@student.ecp.fr |
0c15c1f168b71d40114c30f3ba0a6be67f810679 | 5dfe3eac9c3d53033f7e417268381dabfb775d85 | /WeatherApp/weather/models.py | 828ae21da11660ce95e49714a52723c77be3396f | [] | no_license | DsArmor/Weather-app | 052a235b5ef0f21daf20454edb165cb59bb8e3c2 | 633ea82febcd431c1cca85737b3be016ddc07baa | refs/heads/master | 2021-04-05T20:12:37.026842 | 2020-03-19T20:56:34 | 2020-03-19T20:56:34 | 248,596,533 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 177 | py | from django.db import models
class City(models.Model):
name = models.CharField(max_length=30)
def __str__(self):
return self.name
# Create your models here.
| [
"zeleznoff84-00@mail.ru"
] | zeleznoff84-00@mail.ru |
3f8b5dd668abacd12a63074b3007d07aaf75034a | 83361122ff4ad02d6ae7d0115d0c2fbc2e9b7a72 | /freedom/conftest.py | e1ba1ca8fc949e4bde1f448ae76c5b6b42309d75 | [
"WTFPL"
] | permissive | smbdsbrain/wind_of_freedom | 43779c356b4de626cb8fbed61f8b66b1fa5684b9 | b1b19205175db8c824ab5f4b2e8a8e4e2c5d6873 | refs/heads/master | 2020-03-22T05:29:59.852211 | 2018-07-03T14:05:06 | 2018-07-03T14:05:06 | 139,570,259 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 830 | py | from pathlib import Path
import aioworkers_aiohttp
import pytest
from aioworkers.core.config import Config
from aioworkers.core.context import Context
@pytest.fixture
def config():
plugin_config = Path(aioworkers_aiohttp.__file__).with_name('plugin.ini')
app_config = Path(__file__).parent.with_name('config.yaml')
config = Config().load(plugin_config, app_config)
return config
@pytest.fixture
def context(loop, config):
with Context(config, loop=loop) as ctx:
yield ctx
@pytest.fixture
def app(context):
return context.app
@pytest.fixture
def test_server(app):
from aiohttp.test_utils import TestServer
return TestServer(app)
@pytest.fixture
def anonym_client(app, test_client, test_server):
client = app.loop.run_until_complete(test_client(test_server))
return client
| [
"pk@napoleonit.ru"
] | pk@napoleonit.ru |
56a1b73197db6fe440897906fb20c5dd024ea599 | 361ad65df8de72b4f2b2dfc17df2987177adedc8 | /web/models.py | 0bd414f7ef18d88d9e9fb7c2f498aa39449ac08a | [] | no_license | uborzz/django_uborzz | 51571d28ef7c27e8f7a5f62716490ae34bf7b917 | 4b287cb1f0649016fa2434f050440a881f10bfdc | refs/heads/master | 2020-03-17T11:31:28.324839 | 2018-05-27T19:45:14 | 2018-05-27T19:45:14 | 133,555,172 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,074 | py | from django.db import models
from django.contrib.auth.models import User
# Create your models here.
class Board(models.Model):
name = models.CharField(max_length=30, unique=True)
description = models.CharField(max_length=100)
def __str__(self):
return self.name
class Topic(models.Model):
subject = models.CharField(max_length=255)
last_updated = models.DateTimeField(auto_now_add=True)
board = models.ForeignKey(Board, on_delete=models.CASCADE, related_name='topics')
starter = models.ForeignKey(User, on_delete=models.SET("Usuario Borrado"), related_name='topics')
class Post(models.Model):
message = models.TextField(max_length=4000)
topic = models.ForeignKey(Topic, on_delete=models.CASCADE, related_name='posts')
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(null=True)
created_by = models.ForeignKey(User, on_delete=models.SET("Usuario Borrado"), related_name='posts')
updated_by = models.ForeignKey(User, on_delete=models.SET_NULL, null=True, related_name='+') | [
"uborzz@gmail.com"
] | uborzz@gmail.com |
479eabc4c27c4631d1beee3ab1cb8a2c9be9a668 | 4b7e282fe480415f5d52c0fc0429f144156190fe | /google/ads/googleads/v8/services/types/geographic_view_service.py | 1c6a192bba2848acf2005e44cd07b66cdd125389 | [
"Apache-2.0"
] | permissive | Z2Xsoft/google-ads-python | c4750357bb19da91bb3b6bf2fa84bef9d2df36d3 | 1779d52a0446c8afb2437b0a9e103dcb849f5590 | refs/heads/main | 2023-08-18T15:22:17.840364 | 2021-09-26T04:08:53 | 2021-09-26T04:08:53 | 410,444,398 | 0 | 0 | Apache-2.0 | 2021-09-26T04:08:53 | 2021-09-26T03:55:38 | null | UTF-8 | Python | false | false | 1,237 | py | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
__protobuf__ = proto.module(
package="google.ads.googleads.v8.services",
marshal="google.ads.googleads.v8",
manifest={"GetGeographicViewRequest",},
)
class GetGeographicViewRequest(proto.Message):
r"""Request message for
[GeographicViewService.GetGeographicView][google.ads.googleads.v8.services.GeographicViewService.GetGeographicView].
Attributes:
resource_name (str):
Required. The resource name of the geographic
view to fetch.
"""
resource_name = proto.Field(proto.STRING, number=1,)
__all__ = tuple(sorted(__protobuf__.manifest))
| [
"noreply@github.com"
] | noreply@github.com |
768eef9b25ec968182d16d8bdb7c6042e49fe175 | 60bd978c8a74b284110748ba7fe60b7e99668485 | /LabConector/settings.py | a850207339d593f3b4e833d2ced65c2c3ca479bb | [] | no_license | jesusceron/phr-s | be0a22a22b35e17690d6425ae466280b1a6adaf0 | c32efae76eb7dcaea144cb801993b79971f5ef48 | refs/heads/master | 2021-01-22T10:18:32.294336 | 2014-06-11T22:58:12 | 2014-06-11T22:58:12 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 557 | py | from django.conf import settings # top-level setttings
SUBMODULE_NAME = 'autismscreening'
INDIVO_SERVER_OAUTH = {
'consumer_key': SUBMODULE_NAME+'@apps.indivo.org',
'consumer_secret': SUBMODULE_NAME
}
INDIVO_SERVER_LOCATION = settings.INDIVO_SERVER_LOCATION
INDIVO_UI_SERVER_BASE = settings.INDIVO_UI_SERVER_BASE
# JMVC_HOME = settings.SERVER_ROOT_DIR + '/apps/'+SUBMODULE_NAME+'/jmvc/'
# JS_HOME = JMVC_HOME + SUBMODULE_NAME + '/'
APP_HOME = 'apps/'+SUBMODULE_NAME
TEMPLATE_PREFIX = SUBMODULE_NAME + '/templates'
STATIC_HOME = '/'+APP_HOME+'/static'
| [
"jdceronbravo@gmail.com"
] | jdceronbravo@gmail.com |
c563ebf7c8f48e07c6f75e980fe4f341bf47c19f | 25ebc03b92df764ff0a6c70c14c2848a49fe1b0b | /daily/20180703/example_resumable/05handler_cli.py | 391b49df85d96e1dc81fa2dd64d1562ecb57edaa | [] | no_license | podhmo/individual-sandbox | 18db414fafd061568d0d5e993b8f8069867dfcfb | cafee43b4cf51a321f4e2c3f9949ac53eece4b15 | refs/heads/master | 2023-07-23T07:06:57.944539 | 2023-07-09T11:45:53 | 2023-07-09T11:45:53 | 61,940,197 | 6 | 0 | null | 2022-10-19T05:01:17 | 2016-06-25T11:27:04 | Python | UTF-8 | Python | false | false | 489 | py | import csv
import sys
def main(args):
yield from run(args.input)
def run(itr):
yield ["x", "x*x"]
for x in itr:
x = int(x)
yield {"x": x, "x*x": x * x}
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("--input", action="append", default=["1", "2", "3", "4", "5"])
args = parser.parse_args()
itr = main(args)
w = csv.DictWriter(sys.stdout, fieldnames=next(itr))
w.writerows(itr)
| [
"ababjam61+github@gmail.com"
] | ababjam61+github@gmail.com |
cc2d8f6b8530ab281bd93e4bcf0a34338add65c2 | 61af5349f659cccb5cbb9c87e044929dc5f0fb27 | /solutions/2_Image_Filtering_Exercise3.py | e537ebf135975ffbc561d4a1a27b501c6eb995ce | [
"Apache-2.0"
] | permissive | vicory/CourseInBiomedicalImageAnalysisVisualizationAndArtificialIntelligence | 1540b56c0b30189ab593b68274fb8230b392bfb1 | ed848e8d7cb781019adb453660535f184f2a15d6 | refs/heads/master | 2020-08-05T04:38:26.230444 | 2019-10-03T18:00:11 | 2019-10-03T18:00:11 | 212,398,422 | 0 | 0 | Apache-2.0 | 2019-10-02T17:16:04 | 2019-10-02T17:16:04 | null | UTF-8 | Python | false | false | 198 | py | fileName = 'data/PacMan.png'
reader = itk.ImageFileReader.New(FileName=fileName)
smoother = itk.MedianImageFilter.New(Input=reader.GetOutput())
smoother.SetRadius(5)
smoother.Update()
view(smoother) | [
"francois.budin@kitware.com"
] | francois.budin@kitware.com |
64cb84b1c15f8e8cf1b9a829816d95a8aa175e17 | ce0593c27bc8a524824cc52156ea4b268d25cb32 | /testing/settings.py | aa4f550baedcdaa8d50da4093ea94513a1083356 | [
"Apache-2.0"
] | permissive | Edge-On-Demand/django-bootstrap | c07835b436770c27590009fc11e4d2a42592044e | 3c405765ae4e1d60d05fe213840d848aa314f256 | refs/heads/master | 2021-07-29T20:06:44.344983 | 2021-07-20T12:01:15 | 2021-07-20T12:01:15 | 140,842,917 | 0 | 1 | NOASSERTION | 2019-05-28T22:13:03 | 2018-07-13T12:12:26 | Python | UTF-8 | Python | false | false | 749 | py | import os
DIRNAME = os.path.dirname(__file__)
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3', # Add 'postgresql_psycopg2', 'mysql', 'sqlite3' or 'oracle'.
'NAME': 'database.db', # Or path to database file if using sqlite3.
'USER': '', # Not used with sqlite3.
'PASSWORD': '', # Not used with sqlite3.
'HOST': '', # Set to empty string for localhost. Not used with sqlite3.
'PORT': '', # Set to empty string for default. Not used with sqlite3.
}
}
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'bootstrap',
'bootstrap.tests',
)
| [
"philipthrasher@gmail.com"
] | philipthrasher@gmail.com |
fb64273770c1e971babf4f6f9151936ac9fca6d8 | 422ed1044aa9b265831516f2b341dc803e89ac42 | /person.py | 790f071ccb1a8c423a8c3b9be043dac60e776723 | [] | no_license | sudarshansanjeev/Python | f9dfc4984c21e289d543b0af5027c60e8d4bd8be | 124c750460bd9eccec1e21f3f485b5bb5b4a3276 | refs/heads/master | 2020-05-07T13:35:22.385688 | 2019-04-10T10:19:43 | 2019-04-10T10:19:43 | 180,555,762 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 219 | py | class Person:
def __init__(self,name,age):
self.name = name;
self.age = age;
def __str__(self):
print("Name : ",self.name);
print("Age : ",self.age);
return "";
| [
"noreply@github.com"
] | noreply@github.com |
d0b1bd9f29811eb6a88f19f8aa0b4560fdaf1152 | cbf297b1c62d014435b9faa5fa3c0ffbdc676574 | /review_summarization/TextUtils/textCleaner.py | 80128ab7c0c9c5fccda5440012c08a247ecdee88 | [] | no_license | mehdiomid/text-summarization | 666925636cc9564d64951ac8a211ade11f25c47c | becff4f92e8c57df1bd3b06834bfed30c3f7dba8 | refs/heads/master | 2020-09-07T20:44:22.104678 | 2019-11-17T22:04:28 | 2019-11-17T22:04:28 | 220,908,719 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,478 | py | # -*- coding: utf-8 -*-
"""
Created on Sun Nov 10 15:20:54 2019
@author: Mehdi
"""
from WordsMapping import words_mapping
import re
from bs4 import BeautifulSoup
from nltk.corpus import stopwords
stop_words = set(stopwords.words('english'))
contraction_mapping = words_mapping.contraction_mapping
def text_cleaner(text):
newString = text.lower()
newString = BeautifulSoup(newString, "lxml").text
newString = re.sub(r'\([^)]*\)', '', newString)
newString = re.sub('"','', newString)
newString = ' '.join([contraction_mapping[t] if t in contraction_mapping else t for t in newString.split(" ")])
newString = re.sub(r"'s\b","",newString)
newString = re.sub("[^a-zA-Z]", " ", newString)
tokens = [w for w in newString.split() if not w in stop_words]
long_words=[]
for i in tokens:
if len(i)>=3: #removing short word
long_words.append(i)
return (" ".join(long_words)).strip()
def summary_cleaner(text):
newString = re.sub('"','', text)
newString = ' '.join([contraction_mapping[t] if t in contraction_mapping else t for t in newString.split(" ")])
newString = re.sub(r"'s\b","",newString)
newString = re.sub("[^a-zA-Z]", " ", newString)
newString = newString.lower()
tokens=newString.split()
newString=''
for i in tokens:
if len(i)>1:
newString=newString+i+' '
return newString
| [
"omidghane@gmail.com"
] | omidghane@gmail.com |
50cfb7d3916e5d4762fc7153ca466e612fa8a84e | f60aa70934ef8ebf442c15db2a83b45cdba84671 | /Megalodon/src/vision/apriltag/frame_grab.py | 68f26403b8f0f70f409f84cb6a4f7f9b92156697 | [
"MIT"
] | permissive | tedklin/Echo | 6bda7febadd661a45568ca315c3606dcee67391a | e9937307d02e36a5a111c68f9dbf069baa76bf98 | refs/heads/master | 2020-04-07T00:27:34.757907 | 2019-09-09T06:18:05 | 2019-09-09T06:18:05 | 157,904,716 | 3 | 1 | null | null | null | null | UTF-8 | Python | false | false | 555 | py | import cv2
cam = cv2.VideoCapture(1)
cv2.namedWindow("test")
img_counter = 0
while True:
ret, frame = cam.read()
cv2.imshow("test", frame)
if not ret:
break
k = cv2.waitKey(1)
if k%256 == 27:
# ESC pressed
print("Escape hit, closing...")
break
elif k%256 == 32:
# SPACE pressed
img_name = "opencv_frame_{}.png".format(img_counter)
cv2.imwrite(img_name, frame)
print("{} written!".format(img_name))
img_counter += 1
cam.release()
cv2.destroyAllWindows() | [
"tedklin@gmail.com"
] | tedklin@gmail.com |
e771087dda9f75a0335919a1fb638e8c0f758ab6 | 8fd07ea363ba4263bafe25d213c72cc9a93e2b3e | /nsd2018-master/nsd1804/python/day05/u2d.py | 5bab4f12fb972e1d039dbb0c1b0ab2b1eb7c6dc5 | [] | no_license | ml758392/python_tedu | 82e12ae014f0fc81230386fab07f901510fc8837 | 9f20798604db0ac8cd7b69d8c7a52ee361ebc7a7 | refs/heads/master | 2020-04-12T08:30:42.354663 | 2019-03-29T11:55:30 | 2019-03-29T11:55:30 | 162,386,878 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 288 | py | import sys
def unix2dos(fname):
dst_fname = fname + '.txt'
with open(fname) as src_fobj:
with open(dst_fname, 'w') as dst_fobj:
for line in src_fobj:
dst_fobj.write(line.rstrip() + '\r\n')
if __name__ == '__main__':
unix2dos(sys.argv[1])
| [
"yy.tedu.cn"
] | yy.tedu.cn |
db6165c962d1eebdccaf2e282b16955262c2ef9a | 2ab707ef94cfc3ef366f1ae7554810c38cc07c19 | /Blog/urls.py | 01742612a3647523a383c3b54188724639c43c47 | [] | no_license | ZhangJianyu1918/my_Blog | 921bef1e5e887658a91b113cb8f4c5c05d65c0b2 | ef78769a271b9eb35618ee2e4fa4dfbfe3497bab | refs/heads/master | 2021-01-04T09:32:54.484264 | 2020-02-15T08:43:35 | 2020-02-15T08:44:53 | 240,489,176 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 831 | py | """Blog URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path,include
urlpatterns = [
path('admin/', admin.site.urls),
path('',include('Blogs.urls')),
path('Users/',include('Users.urls')),
]
| [
"zhangjianyu1918@qq.com"
] | zhangjianyu1918@qq.com |
1ee728ba1a2511d5855cc700cfbd84756561b369 | bdb5afdeb7927081cc01a541131f73f9057cfe65 | /mgn_model/reid/models/resnet_reid.py | 62f991d4a22266cdd9b6961d6f293c3ae07a8ebe | [] | no_license | intjun/AIC2020_ReID | 3c1528c8718c998a79fc40742859ad9de102140f | 674ff70fa62930bdea3a700d6fa8a40c9096202e | refs/heads/master | 2022-06-23T21:04:58.218919 | 2020-05-12T02:29:01 | 2020-05-12T02:29:01 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,156 | py | import torch
import torch.nn as nn
from torch.autograd import Variable
from torch.nn import functional as F
import math
import pdb
__all__ = ['ResNet_reid_50','ResNet_reid_101']
def conv3x3(in_planes, out_planes, stride=1):
"3x3 convolution with padding"
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(BasicBlock, self).__init__()
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = nn.BatchNorm2d(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = nn.BatchNorm2d(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(Bottleneck, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride,
padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(planes * 4)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class ResNetBase(nn.Module):
def __init__(self, block, layers, num_classes):
self.inplanes = 64
super(ResNetBase, self).__init__()
self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3,
bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
self.layer4 = self._make_layer(block, 512, layers[3])
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def _make_layer(self, block, planes, blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
return x
class ResNet_reid(nn.Module):
def __init__(self, block, layers, num_classes, num_features, norm = True):
super(ResNet_reid, self).__init__()
self.base = ResNetBase(block, layers, num_classes)
self.num_classes = num_classes
self.num_features = num_features
self.norm = norm
self.dropout = nn.Dropout()
self.layer2_maxpool = nn.MaxPool2d(kernel_size=2,stride=2)
# self.layer3_maxpool = nn.MaxPool2d(kernel_size=2,stride=2)
self.dim_red_conv = nn.Conv2d(512 * block.expansion, self.num_features, 1, bias=False)
nn.init.kaiming_normal(self.dim_red_conv.weight.data, mode='fan_out')
self.dim_red_bn = nn.BatchNorm2d(self.num_features)
self.dim_red_bn.weight.data.fill_(1)
self.dim_red_bn.bias.data.zero_()
self.fc = nn.Linear(self.num_features, self.num_classes, False)
nn.init.normal_(self.fc.weight, std=0.001)
# nn.init.constant_(self.fc.bias, 0)
def forward(self, x):
side_output = {}
for name, module in self.base._modules.items():
# pdb.set_trace()
if name == 'avgpool':
break
# print(name)
x = module(x)
if name == 'layer2':
side_output['layer2']=x
elif name == 'layer3':
side_output['layer3']=x
elif name == 'layer4':
side_output['layer4']=x
# pdb.set_trace()
l2_maxp = self.layer2_maxpool(side_output['layer2']) #batch*512*8*4
l2_side = F.normalize(l2_maxp.pow(2).mean(1).view(l2_maxp.size(0),-1)).view(l2_maxp.size(0),1,l2_maxp.size(2),l2_maxp.size(3)) #batch*1*8*4
l3_maxp = side_output['layer3'] #batch*1024*8*4
l3_side = F.normalize(l3_maxp.pow(2).mean(1).view(l3_maxp.size(0),-1)).view(l3_maxp.size(0),1,l3_maxp.size(2),l3_maxp.size(3)) #batch*1*8*4
l4_maxp = side_output['layer4']
l4_side = F.normalize(l4_maxp.pow(2).mean(1).view(l4_maxp.size(0),-1)).view(l4_maxp.size(0),1,l4_maxp.size(2),l4_maxp.size(3)) #batch*1*8*4
ll_attention_like_map = x
x = nn.functional.avg_pool2d(ll_attention_like_map, ll_attention_like_map.size()[2:])
x = self.dropout(x)
x = self.dim_red_conv(x)
x = self.dim_red_bn(x)
x = x.view(x.size(0),-1)
x_g = x
x = self.fc(x)
if self.norm:
x_g = F.normalize(x_g)
return x, x_g, l2_side, l3_side, l4_side, x_g
def ResNet_reid_50(pretrained=False, **kwargs):
model = ResNet_reid(Bottleneck, [3, 4, 6, 3], **kwargs)
return model
def ResNet_reid_101(pretrained=False, **kwargs):
model = ResNet_reid(Bottleneck, [3, 4, 23, 3], **kwargs)
return model
def ResNet_reid_152(pretrained=False, **kwargs):
model = ResNet_reid(Bottleneck, [3, 8, 36, 3], **kwargs)
return model | [
"cunyuangao@tencent.com"
] | cunyuangao@tencent.com |
6db4be0988c7548d0f320f8e6b8663566e739aed | 93db886848da0d584e022da861f8e4065978bf69 | /americancultures/lib/python3.7/site-packages/oauthlib/oauth1/rfc5849/signature.py | c96fb88dfadbc8cce9f47c5ab54785f0ecb82515 | [] | no_license | jiyoojeong/code_examples_Jan2020 | 91096d7b5b8ac97b49ddfd348f9b75422bec14c8 | 4f0331f87b595b66a0c17db8e8fb2c0c99eff60e | refs/heads/master | 2020-12-27T09:36:53.836823 | 2020-02-03T00:13:46 | 2020-02-03T00:13:46 | 237,853,835 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 28,246 | py | # -*- coding: utf-8 -*-
"""
oauthlib.oauth1.rfc5849.signature
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
This module represents a direct implementation of `section 3.4`_ of the spec.
Terminology:
* Client: software interfacing with an OAuth API
* Server: the API provider
* Resource Owner: the user who is granting authorization to the client
Steps for signing a request:
1. Collect parameters from the uri sort.py, auth header, & body
2. Normalize those parameters
3. Normalize the uri
4. Pass the normalized uri, normalized parameters, and http method to
construct the base string
5. Pass the base string and any keys needed to a signing function
.. _`section 3.4`: https://tools.ietf.org/html/rfc5849#section-3.4
"""
from __future__ import absolute_import, unicode_literals
import binascii
import hashlib
import hmac
import logging
from oauthlib.common import (extract_params, safe_string_equals,
unicode_type, urldecode)
from . import utils
try:
import urlparse
except ImportError:
import urllib.parse as urlparse
log = logging.getLogger(__name__)
def signature_base_string(http_method, base_str_uri,
normalized_encoded_request_parameters):
"""**Construct the signature base string.**
Per `section 3.4.1.1`_ of the spec.
For example, the HTTP request::
POST /request?b5=%3D%253D&a3=a&c%40=&a2=r%20b HTTP/1.1
Host: example.com
Content-Type: application/x-www-form-urlencoded
Authorization: OAuth realm="Example",
oauth_consumer_key="9djdj82h48djs9d2",
oauth_token="kkk9d7dh3k39sjv7",
oauth_signature_method="HMAC-SHA1",
oauth_timestamp="137131201",
oauth_nonce="7d8f3e4a",
oauth_signature="bYT5CMsGcbgUdFHObYMEfcx6bsw%3D"
c2&a3=2+q
is represented by the following signature base string (line breaks
are for display purposes only)::
POST&http%3A%2F%2Fexample.com%2Frequest&a2%3Dr%2520b%26a3%3D2%2520q
%26a3%3Da%26b5%3D%253D%25253D%26c%2540%3D%26c2%3D%26oauth_consumer_
key%3D9djdj82h48djs9d2%26oauth_nonce%3D7d8f3e4a%26oauth_signature_m
ethod%3DHMAC-SHA1%26oauth_timestamp%3D137131201%26oauth_token%3Dkkk
9d7dh3k39sjv7
.. _`section 3.4.1.1`: https://tools.ietf.org/html/rfc5849#section-3.4.1.1
"""
# The signature base string is constructed by concatenating together,
# in order, the following HTTP request elements:
# 1. The HTTP request method in uppercase. For example: "HEAD",
# "GET", "POST", etc. If the request uses a custom HTTP method, it
# MUST be encoded (`Section 3.6`_).
#
# .. _`Section 3.6`: https://tools.ietf.org/html/rfc5849#section-3.6
base_string = utils.escape(http_method.upper())
# 2. An "&" character (ASCII code 38).
base_string += '&'
# 3. The base string URI from `Section 3.4.1.2`_, after being encoded
# (`Section 3.6`_).
#
# .. _`Section 3.4.1.2`: https://tools.ietf.org/html/rfc5849#section-3.4.1.2
# .. _`Section 3.4.6`: https://tools.ietf.org/html/rfc5849#section-3.4.6
base_string += utils.escape(base_str_uri)
# 4. An "&" character (ASCII code 38).
base_string += '&'
# 5. The request parameters as normalized in `Section 3.4.1.3.2`_, after
# being encoded (`Section 3.6`).
#
# .. _`Section 3.4.1.3.2`: https://tools.ietf.org/html/rfc5849#section-3.4.1.3.2
# .. _`Section 3.4.6`: https://tools.ietf.org/html/rfc5849#section-3.4.6
base_string += utils.escape(normalized_encoded_request_parameters)
return base_string
def base_string_uri(uri, host=None):
"""**Base String URI**
Per `section 3.4.1.2`_ of RFC 5849.
For example, the HTTP request::
GET /r%20v/X?id=123 HTTP/1.1
Host: EXAMPLE.COM:80
is represented by the base string URI: "http://example.com/r%20v/X".
In another example, the HTTPS request::
GET /?q=1 HTTP/1.1
Host: www.example.net:8080
is represented by the base string URI: "https://www.example.net:8080/".
.. _`section 3.4.1.2`: https://tools.ietf.org/html/rfc5849#section-3.4.1.2
The host argument overrides the netloc part of the uri argument.
"""
if not isinstance(uri, unicode_type):
raise ValueError('uri must be a unicode object.')
# FIXME: urlparse does not support unicode
scheme, netloc, path, params, query, fragment = urlparse.urlparse(uri)
# The scheme, authority, and path of the request resource URI `RFC3986`
# are included by constructing an "http" or "https" URI representing
# the request resource (without the sort.py or fragment) as follows:
#
# .. _`RFC3986`: https://tools.ietf.org/html/rfc3986
if not scheme or not netloc:
raise ValueError('uri must include a scheme and netloc')
# Per `RFC 2616 section 5.1.2`_:
#
# Note that the absolute path cannot be empty; if none is present in
# the original URI, it MUST be given as "/" (the server root).
#
# .. _`RFC 2616 section 5.1.2`: https://tools.ietf.org/html/rfc2616#section-5.1.2
if not path:
path = '/'
# 1. The scheme and host MUST be in lowercase.
scheme = scheme.lower()
netloc = netloc.lower()
# 2. The host and port values MUST match the content of the HTTP
# request "Host" header field.
if host is not None:
netloc = host.lower()
# 3. The port MUST be included if it is not the default port for the
# scheme, and MUST be excluded if it is the default. Specifically,
# the port MUST be excluded when making an HTTP request `RFC2616`_
# to port 80 or when making an HTTPS request `RFC2818`_ to port 443.
# All other non-default port numbers MUST be included.
#
# .. _`RFC2616`: https://tools.ietf.org/html/rfc2616
# .. _`RFC2818`: https://tools.ietf.org/html/rfc2818
default_ports = (
('http', '80'),
('https', '443'),
)
if ':' in netloc:
host, port = netloc.split(':', 1)
if (scheme, port) in default_ports:
netloc = host
v = urlparse.urlunparse((scheme, netloc, path, params, '', ''))
# RFC 5849 does not specify which characters are encoded in the
# "base string URI", nor how they are encoded - which is very bad, since
# the signatures won't match if there are any differences. Fortunately,
# most URIs only use characters that are clearly not encoded (e.g. digits
# and A-Z, a-z), so have avoided any differences between implementations.
#
# The example from its section 3.4.1.2 illustrates that spaces in
# the path are percent encoded. But it provides no guidance as to what other
# characters (if any) must be encoded (nor how); nor if characters in the
# other components are to be encoded or not.
#
# This implementation **assumes** that **only** the space is percent-encoded
# and it is done to the entire value (not just to spaces in the path).
#
# This code may need to be changed if it is discovered that other characters
# are expected to be encoded.
#
# Note: the "base string URI" returned by this function will be encoded
# again before being concatenated into the "signature base string". So any
# spaces in the URI will actually appear in the "signature base string"
# as "%2520" (the "%20" further encoded according to section 3.6).
return v.replace(' ', '%20')
# ** Request Parameters **
#
# Per `section 3.4.1.3`_ of the spec.
#
# In order to guarantee a consistent and reproducible representation of
# the request parameters, the parameters are collected and decoded to
# their original decoded form. They are then sorted and encoded in a
# particular manner that is often different from their original
# encoding scheme, and concatenated into a single string.
#
# .. _`section 3.4.1.3`: https://tools.ietf.org/html/rfc5849#section-3.4.1.3
def collect_parameters(uri_query='', body=[], headers=None,
exclude_oauth_signature=True, with_realm=False):
"""**Parameter Sources**
Parameters starting with `oauth_` will be unescaped.
Body parameters must be supplied as a dict, a list of 2-tuples, or a
formencoded sort.py string.
Headers must be supplied as a dict.
Per `section 3.4.1.3.1`_ of the spec.
For example, the HTTP request::
POST /request?b5=%3D%253D&a3=a&c%40=&a2=r%20b HTTP/1.1
Host: example.com
Content-Type: application/x-www-form-urlencoded
Authorization: OAuth realm="Example",
oauth_consumer_key="9djdj82h48djs9d2",
oauth_token="kkk9d7dh3k39sjv7",
oauth_signature_method="HMAC-SHA1",
oauth_timestamp="137131201",
oauth_nonce="7d8f3e4a",
oauth_signature="djosJKDKJSD8743243%2Fjdk33klY%3D"
c2&a3=2+q
contains the following (fully decoded) parameters used in the
signature base sting::
+------------------------+------------------+
| Name | Value |
+------------------------+------------------+
| b5 | =%3D |
| a3 | a |
| c@ | |
| a2 | r b |
| oauth_consumer_key | 9djdj82h48djs9d2 |
| oauth_token | kkk9d7dh3k39sjv7 |
| oauth_signature_method | HMAC-SHA1 |
| oauth_timestamp | 137131201 |
| oauth_nonce | 7d8f3e4a |
| c2 | |
| a3 | 2 q |
+------------------------+------------------+
Note that the value of "b5" is "=%3D" and not "==". Both "c@" and
"c2" have empty values. While the encoding rules specified in this
specification for the purpose of constructing the signature base
string exclude the use of a "+" character (ASCII code 43) to
represent an encoded space character (ASCII code 32), this practice
is widely used in "application/x-www-form-urlencoded" encoded values,
and MUST be properly decoded, as demonstrated by one of the "a3"
parameter instances (the "a3" parameter is used twice in this
request).
.. _`section 3.4.1.3.1`: https://tools.ietf.org/html/rfc5849#section-3.4.1.3.1
"""
headers = headers or {}
params = []
# The parameters from the following sources are collected into a single
# list of name/value pairs:
# * The sort.py component of the HTTP request URI as defined by
# `RFC3986, Section 3.4`_. The sort.py component is parsed into a list
# of name/value pairs by treating it as an
# "application/x-www-form-urlencoded" string, separating the names
# and values and decoding them as defined by
# `W3C.REC-html40-19980424`_, Section 17.13.4.
#
# .. _`RFC3986, Section 3.4`: https://tools.ietf.org/html/rfc3986#section-3.4
# .. _`W3C.REC-html40-19980424`: https://tools.ietf.org/html/rfc5849#ref-W3C.REC-html40-19980424
if uri_query:
params.extend(urldecode(uri_query))
# * The OAuth HTTP "Authorization" header field (`Section 3.5.1`_) if
# present. The header's content is parsed into a list of name/value
# pairs excluding the "realm" parameter if present. The parameter
# values are decoded as defined by `Section 3.5.1`_.
#
# .. _`Section 3.5.1`: https://tools.ietf.org/html/rfc5849#section-3.5.1
if headers:
headers_lower = dict((k.lower(), v) for k, v in headers.items())
authorization_header = headers_lower.get('authorization')
if authorization_header is not None:
params.extend([i for i in utils.parse_authorization_header(
authorization_header) if with_realm or i[0] != 'realm'])
# * The HTTP request entity-body, but only if all of the following
# conditions are met:
# * The entity-body is single-part.
#
# * The entity-body follows the encoding requirements of the
# "application/x-www-form-urlencoded" content-type as defined by
# `W3C.REC-html40-19980424`_.
# * The HTTP request entity-header includes the "Content-Type"
# header field set to "application/x-www-form-urlencoded".
#
# .._`W3C.REC-html40-19980424`: https://tools.ietf.org/html/rfc5849#ref-W3C.REC-html40-19980424
# TODO: enforce header param inclusion conditions
bodyparams = extract_params(body) or []
params.extend(bodyparams)
# ensure all oauth params are unescaped
unescaped_params = []
for k, v in params:
if k.startswith('oauth_'):
v = utils.unescape(v)
unescaped_params.append((k, v))
# The "oauth_signature" parameter MUST be excluded from the signature
# base string if present.
if exclude_oauth_signature:
unescaped_params = list(filter(lambda i: i[0] != 'oauth_signature',
unescaped_params))
return unescaped_params
def normalize_parameters(params):
"""**Parameters Normalization**
Per `section 3.4.1.3.2`_ of the spec.
For example, the list of parameters from the previous section would
be normalized as follows:
Encoded::
+------------------------+------------------+
| Name | Value |
+------------------------+------------------+
| b5 | %3D%253D |
| a3 | a |
| c%40 | |
| a2 | r%20b |
| oauth_consumer_key | 9djdj82h48djs9d2 |
| oauth_token | kkk9d7dh3k39sjv7 |
| oauth_signature_method | HMAC-SHA1 |
| oauth_timestamp | 137131201 |
| oauth_nonce | 7d8f3e4a |
| c2 | |
| a3 | 2%20q |
+------------------------+------------------+
Sorted::
+------------------------+------------------+
| Name | Value |
+------------------------+------------------+
| a2 | r%20b |
| a3 | 2%20q |
| a3 | a |
| b5 | %3D%253D |
| c%40 | |
| c2 | |
| oauth_consumer_key | 9djdj82h48djs9d2 |
| oauth_nonce | 7d8f3e4a |
| oauth_signature_method | HMAC-SHA1 |
| oauth_timestamp | 137131201 |
| oauth_token | kkk9d7dh3k39sjv7 |
+------------------------+------------------+
Concatenated Pairs::
+-------------------------------------+
| Name=Value |
+-------------------------------------+
| a2=r%20b |
| a3=2%20q |
| a3=a |
| b5=%3D%253D |
| c%40= |
| c2= |
| oauth_consumer_key=9djdj82h48djs9d2 |
| oauth_nonce=7d8f3e4a |
| oauth_signature_method=HMAC-SHA1 |
| oauth_timestamp=137131201 |
| oauth_token=kkk9d7dh3k39sjv7 |
+-------------------------------------+
and concatenated together into a single string (line breaks are for
display purposes only)::
a2=r%20b&a3=2%20q&a3=a&b5=%3D%253D&c%40=&c2=&oauth_consumer_key=9dj
dj82h48djs9d2&oauth_nonce=7d8f3e4a&oauth_signature_method=HMAC-SHA1
&oauth_timestamp=137131201&oauth_token=kkk9d7dh3k39sjv7
.. _`section 3.4.1.3.2`: https://tools.ietf.org/html/rfc5849#section-3.4.1.3.2
"""
# The parameters collected in `Section 3.4.1.3`_ are normalized into a
# single string as follows:
#
# .. _`Section 3.4.1.3`: https://tools.ietf.org/html/rfc5849#section-3.4.1.3
# 1. First, the name and value of each parameter are encoded
# (`Section 3.6`_).
#
# .. _`Section 3.6`: https://tools.ietf.org/html/rfc5849#section-3.6
key_values = [(utils.escape(k), utils.escape(v)) for k, v in params]
# 2. The parameters are sorted by name, using ascending byte value
# ordering. If two or more parameters share the same name, they
# are sorted by their value.
key_values.sort()
# 3. The name of each parameter is concatenated to its corresponding
# value using an "=" character (ASCII code 61) as a separator, even
# if the value is empty.
parameter_parts = ['{0}={1}'.format(k, v) for k, v in key_values]
# 4. The sorted name/value pairs are concatenated together into a
# single string by using an "&" character (ASCII code 38) as
# separator.
return '&'.join(parameter_parts)
def sign_hmac_sha1_with_client(base_string, client):
return sign_hmac_sha1(base_string,
client.client_secret,
client.resource_owner_secret
)
def sign_hmac_sha1(base_string, client_secret, resource_owner_secret):
"""**HMAC-SHA1**
The "HMAC-SHA1" signature method uses the HMAC-SHA1 signature
algorithm as defined in `RFC2104`_::
digest = HMAC-SHA1 (key, text)
Per `section 3.4.2`_ of the spec.
.. _`RFC2104`: https://tools.ietf.org/html/rfc2104
.. _`section 3.4.2`: https://tools.ietf.org/html/rfc5849#section-3.4.2
"""
# The HMAC-SHA1 function variables are used in following way:
# text is set to the value of the signature base string from
# `Section 3.4.1.1`_.
#
# .. _`Section 3.4.1.1`: https://tools.ietf.org/html/rfc5849#section-3.4.1.1
text = base_string
# key is set to the concatenated values of:
# 1. The client shared-secret, after being encoded (`Section 3.6`_).
#
# .. _`Section 3.6`: https://tools.ietf.org/html/rfc5849#section-3.6
key = utils.escape(client_secret or '')
# 2. An "&" character (ASCII code 38), which MUST be included
# even when either secret is empty.
key += '&'
# 3. The token shared-secret, after being encoded (`Section 3.6`_).
#
# .. _`Section 3.6`: https://tools.ietf.org/html/rfc5849#section-3.6
key += utils.escape(resource_owner_secret or '')
# FIXME: HMAC does not support unicode!
key_utf8 = key.encode('utf-8')
text_utf8 = text.encode('utf-8')
signature = hmac.new(key_utf8, text_utf8, hashlib.sha1)
# digest is used to set the value of the "oauth_signature" protocol
# parameter, after the result octet string is base64-encoded
# per `RFC2045, Section 6.8`.
#
# .. _`RFC2045, Section 6.8`: https://tools.ietf.org/html/rfc2045#section-6.8
return binascii.b2a_base64(signature.digest())[:-1].decode('utf-8')
def sign_hmac_sha256_with_client(base_string, client):
return sign_hmac_sha256(base_string,
client.client_secret,
client.resource_owner_secret
)
def sign_hmac_sha256(base_string, client_secret, resource_owner_secret):
"""**HMAC-SHA256**
The "HMAC-SHA256" signature method uses the HMAC-SHA256 signature
algorithm as defined in `RFC4634`_::
digest = HMAC-SHA256 (key, text)
Per `section 3.4.2`_ of the spec.
.. _`RFC4634`: https://tools.ietf.org/html/rfc4634
.. _`section 3.4.2`: https://tools.ietf.org/html/rfc5849#section-3.4.2
"""
# The HMAC-SHA256 function variables are used in following way:
# text is set to the value of the signature base string from
# `Section 3.4.1.1`_.
#
# .. _`Section 3.4.1.1`: https://tools.ietf.org/html/rfc5849#section-3.4.1.1
text = base_string
# key is set to the concatenated values of:
# 1. The client shared-secret, after being encoded (`Section 3.6`_).
#
# .. _`Section 3.6`: https://tools.ietf.org/html/rfc5849#section-3.6
key = utils.escape(client_secret or '')
# 2. An "&" character (ASCII code 38), which MUST be included
# even when either secret is empty.
key += '&'
# 3. The token shared-secret, after being encoded (`Section 3.6`_).
#
# .. _`Section 3.6`: https://tools.ietf.org/html/rfc5849#section-3.6
key += utils.escape(resource_owner_secret or '')
# FIXME: HMAC does not support unicode!
key_utf8 = key.encode('utf-8')
text_utf8 = text.encode('utf-8')
signature = hmac.new(key_utf8, text_utf8, hashlib.sha256)
# digest is used to set the value of the "oauth_signature" protocol
# parameter, after the result octet string is base64-encoded
# per `RFC2045, Section 6.8`.
#
# .. _`RFC2045, Section 6.8`: https://tools.ietf.org/html/rfc2045#section-6.8
return binascii.b2a_base64(signature.digest())[:-1].decode('utf-8')
_jwtrs1 = None
#jwt has some nice pycrypto/cryptography abstractions
def _jwt_rs1_signing_algorithm():
global _jwtrs1
if _jwtrs1 is None:
import jwt.algorithms as jwtalgo
_jwtrs1 = jwtalgo.RSAAlgorithm(jwtalgo.hashes.SHA1)
return _jwtrs1
def sign_rsa_sha1(base_string, rsa_private_key):
"""**RSA-SHA1**
Per `section 3.4.3`_ of the spec.
The "RSA-SHA1" signature method uses the RSASSA-PKCS1-v1_5 signature
algorithm as defined in `RFC3447, Section 8.2`_ (also known as
PKCS#1), using SHA-1 as the hash function for EMSA-PKCS1-v1_5. To
use this method, the client MUST have established client credentials
with the server that included its RSA public key (in a manner that is
beyond the scope of this specification).
.. _`section 3.4.3`: https://tools.ietf.org/html/rfc5849#section-3.4.3
.. _`RFC3447, Section 8.2`: https://tools.ietf.org/html/rfc3447#section-8.2
"""
if isinstance(base_string, unicode_type):
base_string = base_string.encode('utf-8')
# TODO: finish RSA documentation
alg = _jwt_rs1_signing_algorithm()
key = _prepare_key_plus(alg, rsa_private_key)
s=alg.sign(base_string, key)
return binascii.b2a_base64(s)[:-1].decode('utf-8')
def sign_rsa_sha1_with_client(base_string, client):
if not client.rsa_key:
raise ValueError('rsa_key is required when using RSA signature method.')
return sign_rsa_sha1(base_string, client.rsa_key)
def sign_plaintext(client_secret, resource_owner_secret):
"""Sign a request using plaintext.
Per `section 3.4.4`_ of the spec.
The "PLAINTEXT" method does not employ a signature algorithm. It
MUST be used with a transport-layer mechanism such as TLS or SSL (or
sent over a secure channel with equivalent protections). It does not
utilize the signature base string or the "oauth_timestamp" and
"oauth_nonce" parameters.
.. _`section 3.4.4`: https://tools.ietf.org/html/rfc5849#section-3.4.4
"""
# The "oauth_signature" protocol parameter is set to the concatenated
# value of:
# 1. The client shared-secret, after being encoded (`Section 3.6`_).
#
# .. _`Section 3.6`: https://tools.ietf.org/html/rfc5849#section-3.6
signature = utils.escape(client_secret or '')
# 2. An "&" character (ASCII code 38), which MUST be included even
# when either secret is empty.
signature += '&'
# 3. The token shared-secret, after being encoded (`Section 3.6`_).
#
# .. _`Section 3.6`: https://tools.ietf.org/html/rfc5849#section-3.6
signature += utils.escape(resource_owner_secret or '')
return signature
def sign_plaintext_with_client(base_string, client):
return sign_plaintext(client.client_secret, client.resource_owner_secret)
def verify_hmac_sha1(request, client_secret=None,
resource_owner_secret=None):
"""Verify a HMAC-SHA1 signature.
Per `section 3.4`_ of the spec.
.. _`section 3.4`: https://tools.ietf.org/html/rfc5849#section-3.4
To satisfy `RFC2616 section 5.2`_ item 1, the request argument's uri
attribute MUST be an absolute URI whose netloc part identifies the
origin server or gateway on which the resource resides. Any Host
item of the request argument's headers dict attribute will be
ignored.
.. _`RFC2616 section 5.2`: https://tools.ietf.org/html/rfc2616#section-5.2
"""
norm_params = normalize_parameters(request.params)
bs_uri = base_string_uri(request.uri)
sig_base_str = signature_base_string(request.http_method, bs_uri,
norm_params)
signature = sign_hmac_sha1(sig_base_str, client_secret,
resource_owner_secret)
match = safe_string_equals(signature, request.signature)
if not match:
log.debug('Verify HMAC-SHA1 failed: signature base string: %s',
sig_base_str)
return match
def verify_hmac_sha256(request, client_secret=None,
resource_owner_secret=None):
"""Verify a HMAC-SHA256 signature.
Per `section 3.4`_ of the spec.
.. _`section 3.4`: https://tools.ietf.org/html/rfc5849#section-3.4
To satisfy `RFC2616 section 5.2`_ item 1, the request argument's uri
attribute MUST be an absolute URI whose netloc part identifies the
origin server or gateway on which the resource resides. Any Host
item of the request argument's headers dict attribute will be
ignored.
.. _`RFC2616 section 5.2`: https://tools.ietf.org/html/rfc2616#section-5.2
"""
norm_params = normalize_parameters(request.params)
bs_uri = base_string_uri(request.uri)
sig_base_str = signature_base_string(request.http_method, bs_uri,
norm_params)
signature = sign_hmac_sha256(sig_base_str, client_secret,
resource_owner_secret)
match = safe_string_equals(signature, request.signature)
if not match:
log.debug('Verify HMAC-SHA256 failed: signature base string: %s',
sig_base_str)
return match
def _prepare_key_plus(alg, keystr):
if isinstance(keystr, bytes):
keystr = keystr.decode('utf-8')
return alg.prepare_key(keystr)
def verify_rsa_sha1(request, rsa_public_key):
"""Verify a RSASSA-PKCS #1 v1.5 base64 encoded signature.
Per `section 3.4.3`_ of the spec.
Note this method requires the jwt and cryptography libraries.
.. _`section 3.4.3`: https://tools.ietf.org/html/rfc5849#section-3.4.3
To satisfy `RFC2616 section 5.2`_ item 1, the request argument's uri
attribute MUST be an absolute URI whose netloc part identifies the
origin server or gateway on which the resource resides. Any Host
item of the request argument's headers dict attribute will be
ignored.
.. _`RFC2616 section 5.2`: https://tools.ietf.org/html/rfc2616#section-5.2
"""
norm_params = normalize_parameters(request.params)
bs_uri = base_string_uri(request.uri)
sig_base_str = signature_base_string(request.http_method, bs_uri,
norm_params).encode('utf-8')
sig = binascii.a2b_base64(request.signature.encode('utf-8'))
alg = _jwt_rs1_signing_algorithm()
key = _prepare_key_plus(alg, rsa_public_key)
verify_ok = alg.verify(sig_base_str, key, sig)
if not verify_ok:
log.debug('Verify RSA-SHA1 failed: signature base string: %s',
sig_base_str)
return verify_ok
def verify_plaintext(request, client_secret=None, resource_owner_secret=None):
"""Verify a PLAINTEXT signature.
Per `section 3.4`_ of the spec.
.. _`section 3.4`: https://tools.ietf.org/html/rfc5849#section-3.4
"""
signature = sign_plaintext(client_secret, resource_owner_secret)
match = safe_string_equals(signature, request.signature)
if not match:
log.debug('Verify PLAINTEXT failed')
return match
| [
"jiyooj@gmail.com"
] | jiyooj@gmail.com |
75aa760a5335cac72dbbcde939f818d0c5ecf3ac | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_366/ch27_2019_03_05_20_56_38_513299.py | 0e3a0ae523a08345d0ba9fc83d035fa90b50cc99 | [] | no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 280 | py | qtde_cigarros = int(input('Quantos cigarros você fuma por dia?'))
qtde_anos = float(input('Há quantos anos você fuma?'))
def tempo_perdido(qtde_cigarros, qtde_anos):
y = qtde_cigarros*365*qtde_anos/144
return y
c = tempo_perdido(qtde_cigarros, qtde_anos)
print(c) | [
"you@example.com"
] | you@example.com |
95af48986e6360c3a10016b67e4c6662719e3910 | 90f223fecb93da48cccaeec51b99b23e77a05b43 | /bases_2021_1S/Grupo 06/server/Fase1/storage/storageManager/storage.py | 7f9e5dc92a119e320568e8dc9ba7ab280c63c9c9 | [
"MIT"
] | permissive | ByLy23/tytus | 646c08a939b88acdf26a261837f63da1b65ccb4a | 93a3e1127d399554fd8a3f6f5f287aa7ac23efc2 | refs/heads/main | 2023-05-09T03:30:37.708123 | 2021-06-02T02:06:24 | 2021-06-02T02:06:24 | 322,924,013 | 0 | 0 | MIT | 2021-06-02T02:06:24 | 2020-12-19T19:35:58 | Python | UTF-8 | Python | false | false | 78,090 | py | # -------------------------------
# Released under MIT License
# Copyright (c) 2020 TytusDb Team 18
from Fase1.storage.storageManager.avl import avlMode as avl
# from avl import avlMode as avl
# fro
from Fase1.storage.storageManager.b import BMode as b
# from b import BMode as b
from Fase1.storage.storageManager.bplus import BPlusMode as bplus
from Fase1.storage.storageManager.dict import DictMode as dict
from Fase1.storage.storageManager.hash import HashMode as hash
from Fase1.storage.storageManager.isam import ISAMMode as isam
from Fase1.storage.storageManager.json1 import jsonMode as json
from Fase1.storage.storageManager import Serializable as Serializable
from Fase1.storage.storageManager import blockchain as block
from Fase1.storage.storageManager import Criptografia as crypt
import hashlib
import shutil
import os
import re
#----------------Data--------------------#
def checkData():
if not os.path.isdir("./Data"):
os.mkdir("./Data")
if not os.path.isfile("./Data/Data.bin"):
dataBaseTree = {}
Serializable.update('./Data', 'Data', dataBaseTree)
Serializable.update('./Data', 'DataTables', dataBaseTree)
Serializable.update('./Data', 'DataTablesRef', dataBaseTree)
if not os.path.isdir("./Data/security"):
os.mkdir("./Data/security")
if not os.path.isdir("./Data/hash"):
hash.__init__()
hash._storage = hash.ListaBaseDatos.ListaBaseDatos()
if not os.path.isdir("./Data/B"):
os.mkdir("./Data/B")
b.b = b.db.DB()
def validateIdentifier(identifier):
# Returns true if is valid
try:
return re.search("^[a-zA-Z][a-zA-Z0-9#@$_]*", identifier)
except:
return False
def dropAll():
dict.dropAll()
hash.__init__()
hash._storage = hash.ListaBaseDatos.ListaBaseDatos()
b.b = b.db.DB()
#----------------DataBase----------------#
def createDatabase(database: str, mode: str, encoding = 'utf8') -> int:
checkData()
if not validateIdentifier(database):
return 1
data = Serializable.Read('./Data/',"Data")
if encoding not in ['ascii', 'iso-8859-1', 'utf8']:
return 4
if mode not in ['avl', 'b', 'bplus', 'dict', 'isam', 'json', 'hash']:
return 3
if not data.get(database.upper()):
if mode == 'avl':
res = avl.createDatabase(database)
elif mode == 'b':
res = b.createDatabase(database)
elif mode == 'bplus':
res = bplus.createDatabase(database)
elif mode == 'dict':
res = dict.createDatabase(database)
elif mode == 'isam':
res = isam.createDatabase(database)
elif mode == 'json':
res = json.createDatabase(database)
elif mode == 'hash':
res = hash.createDatabase(database)
if not res:
data[database.upper()] = [database,[mode],encoding, -2]
Serializable.update('./Data', 'Data', data)
return res
else:
return 2
def showDatabases():
checkData()
data = Serializable.Read('./Data/',"Data")
temp = []
temp2 = []
temp3 = []
for x in list(data.values()):
temp.append(x[0])
for x in list(data.values()):
temp2.append(x[0])
temp2.append(x[1][0])
temp3.append(temp2)
temp2 = []
return [temp, temp3]
def alterDatabase(databaseOld, databaseNew) -> int:
checkData()
try:
if not validateIdentifier(databaseNew):
return 1
data = Serializable.Read('./Data/',"Data")
db = data.get(databaseOld.upper())
if db:
if data.get(databaseNew.upper()):
return 3
tablas = []
databaseOld = db[0]
if 'avl' in db[1]:
res = avl.alterDatabase(databaseOld, databaseNew)
tablas += avl.showTables(databaseNew)
if 'b' in db[1]:
res = b.alterDatabase(databaseOld, databaseNew)
tablas += b.showTables(databaseNew)
if 'bplus'in db[1]:
res = bplus.alterDatabase(databaseOld, databaseNew)
tablas += bplus.showTables(databaseNew)
if 'dict'in db[1]:
res = dict.alterDatabase(databaseOld, databaseNew)
tablas += dict.showTables(databaseNew)
if 'isam'in db[1]:
res = isam.alterDatabase(databaseOld, databaseNew)
tablas += isam.showTables(databaseNew)
if 'json'in db[1]:
res = json.alterDatabase(databaseOld, databaseNew)
tablas += json.showTables(databaseNew)
if 'hash'in db[1]:
res = hash.alterDatabase(databaseOld, databaseNew)
tablas += hash.showTables(databaseNew)
if not res:
del data[databaseOld.upper()]
db[0] = databaseNew
data[databaseNew.upper()] = db
Serializable.update('./Data', 'Data', data)
if len(tablas):
dataTable = Serializable.Read('./Data/',"DataTables")
dataTableRef = Serializable.Read('./Data/',"DataTablesRef")
for x in tablas:
tab = dataTable.get(databaseOld.upper()+"_"+x.upper())
if tab:
tab[0] = databaseNew
dataTable[databaseNew.upper()+"_"+x.upper()] = tab
del dataTable[databaseOld.upper()+"_"+x.upper()]
else:
dataTableRef[x.upper()+"_"+databaseNew.upper()] = dataTableRef.get(x.upper()+"_"+databaseOld.upper())
del dataTableRef[x.upper()+"_"+databaseOld.upper()]
if os.path.isfile("./Data/security/"+databaseOld+"_"+x+".json"):
os.rename("./Data/security/"+databaseOld+"_"+x+".json","./Data/security/"+databaseNew+"_"+x+".json")
Serializable.update('./Data', 'DataTables', dataTable)
Serializable.update('./Data', 'DataTablesRef', dataTableRef)
Serializable.update('./Data', 'Data', data)
return res
else:
return 2
except:
return 1
def dropDatabase(database: str) -> int:
checkData()
try:
data = Serializable.Read('./Data/',"Data")
db = data.get(database.upper())
if db:
mode =db[1][0]
database = db[0]
if 'avl' in db[1]:
if mode == 'avl':
res = avl.dropDatabase(database)
else:
avl.dropDatabase(database)
if 'b' in db[1]:
if mode == 'b':
res = b.dropDatabase(database)
else:
b.dropDatabase(database)
if 'bplus' in db[1]:
if mode == 'bplus':
res = bplus.dropDatabase(database)
else:
bplus.dropDatabase(database)
if 'dict' in db[1]:
if mode == 'dict':
res = dict.dropDatabase(database)
else:
dict.dropDatabase(database)
if 'isam' in db[1]:
if mode == 'isam':
res = isam.dropDatabase(database)
else:
isam.dropDatabase(database)
if 'json' in db[1]:
if mode == 'json':
res = json.dropDatabase(database)
else:
json.dropDatabase(database)
if 'hash' in db[1]:
if mode == 'hash':
res = hash.dropDatabase(database)
else:
hash.dropDatabase(database)
if not res:
del data[database.upper()]
Serializable.update('./Data', 'Data', data)
return res
else:
return 2
except:
return 1
def alterDatabaseMode(database: str, mode: str) -> int:
checkData()
try:
data = Serializable.Read('./Data/',"Data")
db = data.get(database.upper())
if mode not in ['avl', 'b', 'bplus', 'dict', 'isam', 'json', 'hash']:
return 4
if db:
tablas = []
database = db[0]
mod =db[1][0]
if mod== mode:
return 1
if mod == 'avl':
tablas = avl.showTables(database)
res = cambioTablas(avl, tablas, database, mode, db)
if not res:
avl.dropDatabase(database)
elif mod == 'b':
tablas = b.showTables(database)
res = cambioTablas(b, tablas, database, mode, db)
if not res:
b.dropDatabase(database)
elif mod == 'bplus':
tablas = bplus.showTables(database)
res = cambioTablas(bplus, tablas, database, mode, db)
if not res:
bplus.dropDatabase(database)
elif mod == 'dict':
tablas = dict.showTables(database)
res = cambioTablas(dict, tablas, database, mode, db)
if not res:
dict.dropDatabase(database)
elif mod == 'isam':
tablas = isam.showTables(database)
res = cambioTablas(isam, tablas, database, mode, db)
if not res:
isam.dropDatabase(database)
elif mod == 'json':
tablas = json.showTables(database)
res = cambioTablas(json, tablas, database, mode, db)
if not res:
json.dropDatabase(database)
elif mod == 'hash':
tablas = hash.showTables(database)
res = cambioTablas(hash, tablas, database, mode, db)
if not res:
hash.dropDatabase(database)
data[database.upper()] = db
Serializable.update('./Data', 'Data', data)
return res
else:
return 2
except:
return 1
def cambioTablas(modo, tablas, database, mode, db):
checkData()
if mode in db:
db[1].pop(0)
db[1].remove(mode)
db[1].insert(0,mode)
else:
db[1].pop(0)
db[1].insert(0,mode)
if mode == 'avl':
avl.createDatabase(database)
mod = avl
elif mode == 'b':
b.createDatabase(database)
mod = b
elif mode == 'bplus':
bplus.createDatabase(database)
mod = bplus
elif mode == 'dict':
dict.createDatabase(database)
mod = dict
elif mode == 'isam':
isam.createDatabase(database)
mod = isam
elif mode == 'json':
json.createDatabase(database)
mod = json
elif mode == 'hash':
hash.createDatabase(database)
mod = hash
import csv
dataTable = Serializable.Read('./Data/',"DataTables")
dataTableRef = Serializable.Read('./Data/',"DataTablesRef")
for x in tablas:
tab = dataTable.get(database.upper()+"_"+x.upper())
if tab:
tab[1] = mode
mod.createTable(database, x, tab[2])
if len(tab[3]):
mod.alterAddPK(database, x, tab[3])
else:
mod.createTable(database, x, dataTableRef.get(x.upper()+"_"+database.upper()))
file = open("./data/change.csv", "w", newline='', encoding='utf-8')
spamreader = csv.writer(file, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL)
tipado = []
for y in modo.extractTable(database, x):
tipado_tupla = []
for t in y:
tipado_tupla.append(type(t))
tipado.append(tipado_tupla)
spamreader.writerow(y)
file.close()
mod.loadCSV("./data/change.csv", database, x, tipado)
os.remove("./data/change.csv")
Serializable.update('./Data', 'DataTables', dataTable)
return 0
def alterDatabaseEncoding(database: str, encoding: str) -> int:
checkData()
try:
data = Serializable.Read('./Data/',"Data")
if encoding not in ['ascii', 'iso-8859-1', 'utf8']:
return 3
db = data.get(database.upper())
if db:
database = db[0]
res = showTables(database)
if res:
if len(res):
for x in res:
row = extractTable(database, x)
if row:
if len(row):
for l in row:
for g in l:
if type(g) == str:
g.encode(encoding)
db[2] == encoding
data[database.upper()] = db
Serializable.update('./Data', 'Data', data)
return 0
else:
return 2
except:
return 1
#----------------Table-------------------#
def createTable(database: str, table: str, numberColumns: int) -> int:
checkData()
try:
if not validateIdentifier(table):
return 1
data = Serializable.Read('./Data/',"Data")
dataTable = Serializable.Read('./Data/',"DataTables")
dataTableRef = Serializable.Read('./Data/',"DataTablesRef")
db = data.get(database.upper())
if db:
database = db[0]
mode =db[1][0]
if mode == 'avl':
res = avl.createTable(database, table, numberColumns)
dataTable[database.upper()+"_"+table.upper()] = [table, 'avl', numberColumns, [], db[3]]
elif mode == 'b':
res = b.createTable(database, table, numberColumns)
dataTable[database.upper()+"_"+table.upper()] = [table, 'b', numberColumns, [], db[3]]
elif mode == 'bplus':
res = bplus.createTable(database, table, numberColumns)
dataTable[database.upper()+"_"+table.upper()] = [table, 'bplus', numberColumns, [], db[3]]
elif mode == 'dict':
res = dict.createTable(database, table, numberColumns)
dataTable[database.upper()+"_"+table.upper()] = [table, 'dict', numberColumns, [], db[3]]
elif mode == 'isam':
res = isam.createTable(database, table, numberColumns)
dataTable[database.upper()+"_"+table.upper()] = [table, 'isam', numberColumns, [], db[3]]
elif mode == 'json':
res = json.createTable(database, table, numberColumns)
dataTable[database.upper()+"_"+table.upper()] = [table, 'json', numberColumns, [], db[3]]
elif mode == 'hash':
res = hash.createTable(database, table, numberColumns)
dataTable[database.upper()+"_"+table.upper()] = [table, 'hash', numberColumns, [], db[3]]
if not res:
createRefTAbles(database, 'TABLE_REF_FK_'+table, 6, mode)
createRefTAbles(database, 'TABLE_REF_INDEXU_'+table, 4, mode)
createRefTAbles(database, 'TABLE_REF_INDEX_'+table, 4, mode)
dataTableRef['TABLE_REF_FK_'+table.upper()+"_"+database.upper()] = 6
dataTableRef['TABLE_REF_INDEXU_'+table.upper()+"_"+database.upper()] = 4
dataTableRef['TABLE_REF_INDEX_'+table.upper()+"_"+database.upper()] = 4
Serializable.update('./Data', 'DataTables', dataTable)
Serializable.update('./Data', 'DataTablesRef', dataTableRef)
return res
else:
return 2
except:
return 1
def showTables(database: str) -> list:
checkData()
try:
data = Serializable.Read('./Data/',"Data")
db = data.get(database.upper())
if db:
database = db[0]
res = []
if 'avl' in db[1]:
res = res + avl.showTables(database)
if 'b' in db[1]:
res = res + b.showTables(database)
if 'bplus' in db[1]:
res = res + bplus.showTables(database)
if 'dict' in db[1]:
res = res + dict.showTables(database)
if 'isam' in db[1]:
res = res + isam.showTables(database)
if 'json' in db[1]:
res = res + json.showTables(database)
if 'hash' in db[1]:
res = res + hash.showTables(database)
if res:
ret = []
dataTable = Serializable.Read('./Data/',"DataTables")
for x in res:
tab = dataTable.get(database.upper()+"_"+x.upper())
if tab:
ret.append(x)
return ret
else:
return None
except:
return None
def extractTable(database: str, table: str) -> list:
checkData()
try:
data = Serializable.Read('./Data/',"Data")
db = data.get(database.upper())
dataTable = Serializable.Read('./Data/',"DataTables")
tab = dataTable.get(database.upper()+"_"+table.upper())
if db:
database = db[0]
if tab:
table = tab[0]
if tab[1] == 'avl':
res = avl.extractTable(database, table)
elif tab[1] == 'b':
res = b.extractTable(database, table)
elif tab[1] == 'bplus':
res = bplus.extractTable(database, table)
elif tab[1] == 'dict':
res = dict.extractTable(database, table)
elif tab[1] == 'isam':
res = isam.extractTable(database, table)
elif tab[1] == 'json':
res = json.extractTable(database, table)
elif tab[1] == 'hash':
res = hash.extractTable(database, table)
ret = []
if len(res):
if tab[4]!=-2:
import zlib
for tupla in res:
rr=[]
for x in tupla:
if type(x) == str:
rr.append(zlib.decompress(bytes.fromhex(x)).decode())
else:
rr.append(x)
ret.append(rr)
if len(ret):
return ret
else:
return res
return None
except:
return None
def extractRangeTable(database: str, table: str, columnNumber: int,
lower: any, upper: any) -> list:
checkData()
try:
data = Serializable.Read('./Data/',"Data")
db = data.get(database.upper())
dataTable = Serializable.Read('./Data/',"DataTables")
tab = dataTable.get(database.upper()+"_"+table.upper())
if db:
if tab:
database = db[0]
table = tab[0]
res = None
if tab[1] == 'avl':
res = avl.extractRangeTable(database, table, columnNumber, lower, upper)
elif tab[1] == 'b':
res = b.extractRangeTable(database, table, columnNumber, lower, upper)
elif tab[1] == 'bplus':
res = bplus.extractRangeTable(database, table, columnNumber, lower, upper)
elif tab[1] == 'dict':
res = dict.extractRangeTable(database, table, columnNumber, lower, upper)
elif tab[1] == 'isam':
res = isam.extractRangeTable(database, table, columnNumber, lower, upper)
elif tab[1] == 'json':
res = json.extractRangeTable(database, table, lower, upper)
elif tab[1] == 'hash':
res = hash.extractRangeTable(database, table, columnNumber, lower, upper)
ret = []
if len(res):
if tab[4]!=-2:
import zlib
for tupla in res:
rr=[]
for x in tupla:
if type(x) == str:
rr.append(zlib.decompress(bytes.fromhex(x)).decode())
else:
rr.append(x)
ret.append(rr)
if len(ret):
return ret
else:
return res
return 3
else:
return 2
except:
return 1
def alterAddPK(database: str, table: str, columns: list) -> int:
checkData()
try:
data = Serializable.Read('./Data/',"Data")
dataTable = Serializable.Read('./Data/',"DataTables")
tab = dataTable.get(database.upper()+"_"+table.upper())
db = data.get(database.upper())
if db:
if tab:
database = db[0]
table = tab[0]
if tab[1] == 'avl':
res = avl.alterAddPK(database, table, columns)
elif tab[1] == 'b':
res = b.alterAddPK(database, table, columns)
elif tab[1] == 'bplus':
res = bplus.alterAddPK(database, table, columns)
elif tab[1] == 'dict':
res = dict.alterAddPK(database, table, columns)
elif tab[1] == 'isam':
res = isam.alterAddPK(database, table, columns)
elif tab[1] == 'json':
res = json.alterAddPK(database, table, columns)
elif tab[1] == 'hash':
res = hash.alterAddPK(database, table, columns)
if not res:
tab[3] = columns
dataTable[database.upper()+"_"+table.upper()] = tab
Serializable.update('./Data', 'DataTables', dataTable)
return res
else:
return 3
else:
return 2
except:
return 1
def alterDropPK(database: str, table: str) -> int:
checkData()
try:
data = Serializable.Read('./Data/',"Data")
db = data.get(database.upper())
dataTable = Serializable.Read('./Data/',"DataTables")
tab = dataTable.get(database.upper()+"_"+table.upper())
if db:
if tab:
database = db[0]
table = db[0]
if tab[1] == 'avl':
res = avl.alterDropPK(database, table)
elif tab[1] == 'b':
res = b.alterDropPK(database, table)
elif tab[1] == 'bplus':
res = bplus.alterDropPK(database, table)
elif tab[1] == 'dict':
res = dict.alterDropPK(database, table)
elif tab[1] == 'isam':
res = isam.alterDropPK(database, table)
elif tab[1] == 'json':
res = json.alterDropPK(database, table)
elif tab[1] == 'hash':
res = hash.alterDropPK(database, table)
if not res:
tab[3] = []
Serializable.update('./Data', 'DataTables', dataTable)
return res
else:
return 3
else:
return 2
except:
return 1
def alterTableAddFK(database: str, table: str, indexName: str,
columns: list, tableRef: str, columnsRef: list) -> int:
try:
if len(columnsRef)!=len(columns):
return 4
for x in columns:
if type(x)!=int:
return 1
for x in columnsRef:
if type(x)!=int:
return 1
data = Serializable.Read('./Data/',"Data")
db = data.get(database.upper())
if db:
database = db[0]
dataTable = Serializable.Read('./Data/',"DataTables")
tab = dataTable.get(database.upper()+"_"+table.upper())
tabref = dataTable.get(database.upper()+"_"+tableRef.upper())
if tab and tabref:
table = tab[0]
tableRef = tabref[0]
if min(columnsRef) < 0 or min(columns) < 0 and max(columnsRef) >= tabref[2] and max(columns)>= tab[2]:
return 1
mode =db[1][0]
register = [indexName, database, table, columns, tableRef, columnsRef]
res = registerRefTAbles(database, 'TABLE_REF_FK_'+table, register, mode)
return res
return 3
else:
return 2
except:
return 1
def createRefTAbles(database, tableref, numberColumns, mode):
if mode == 'avl':
res = avl.createTable(database, tableref, numberColumns)
res = avl.alterAddPK(database,tableref,[0])
elif mode == 'b':
res = b.createTable(database, tableref, numberColumns)
res = b.alterAddPK(database,tableref,[0])
elif mode == 'bplus':
res = bplus.createTable(database, tableref, numberColumns)
res = bplus.alterAddPK(database,tableref,[0])
elif mode == 'dict':
res = dict.createTable(database, tableref, numberColumns)
res = dict.alterAddPK(database,tableref,[0])
elif mode == 'isam':
res = isam.createTable(database, tableref, numberColumns)
res = isam.alterAddPK(database,tableref,[0])
elif mode == 'json':
res = json.createTable(database, tableref, numberColumns)
res = json.alterAddPK(database,tableref,[0])
elif mode == 'hash':
res = hash.createTable(database, tableref, numberColumns)
res = hash.alterAddPK(database,tableref,[0])
return res
def buscarcreateRefTables(database, tableref, mode, index):
if mode == 'avl':
res = avl.extractRow(database, tableref,[index])
elif mode == 'b':
res = b.extractRow(database, tableref,[index])
elif mode == 'bplus':
res = bplus.extractRow(database, tableref,[index])
elif mode == 'dict':
res = dict.extractRow(database, tableref,[index])
elif mode == 'isam':
res = isam.extractRow(database, tableref,[index])
elif mode == 'json':
res = json.extractRow(database, tableref,[index])
elif mode == 'hash':
res = hash.extractRow(database, tableref,[index])
return res
def registerRefTAbles(database, tableref, register, mode):
if mode == 'avl':
res = avl.insert(database, tableref, register)
elif mode == 'b':
res = b.insert(database, tableref, register)
elif mode == 'bplus':
res = bplus.insert(database, tableref, register)
elif mode == 'dict':
res = dict.insert(database, tableref, register)
elif mode == 'isam':
res = isam.insert(database, tableref, register)
elif mode == 'json':
res = json.insert(database, tableref, register)
elif mode == 'hash':
res = hash.insert(database, tableref, register)
if res:
return 1
return res
def dropRefTAbles(database, tableref, mode, index):
if mode == 'avl':
res = avl.delete(database, tableref,[index])
elif mode == 'b':
res = b.delete(database, tableref,[index])
elif mode == 'bplus':
res = bplus.delete(database, tableref,[index])
elif mode == 'dict':
res = dict.delete(database, tableref,[index])
elif mode == 'isam':
res = isam.delete(database, tableref,[index])
elif mode == 'json':
res = json.delete(database, tableref,[index])
elif mode == 'hash':
res = hash.delete(database, tableref,[index])
if res:
return 1
return res
def alterTableDropFK(database: str, table: str, indexName: str) -> int:
try:
data = Serializable.Read('./Data/',"Data")
db = data.get(database.upper())
if db:
dataTable = Serializable.Read('./Data/',"DataTablesRef")
dataTables = Serializable.Read('./Data/',"DataTables")
tb = dataTables.get(database.upper()+"_"+table.upper())
tab = dataTable.get('TABLE_REF_FK_'+table.upper()+"_"+database.upper())
if tab and tb:
database = db[0]
table = tb[0]
if not buscarcreateRefTables(database, 'TABLE_REF_FK_'+table, db[1][0], indexName):
return 4
res = dropRefTAbles(database, 'TABLE_REF_FK_'+table, db[1][0], indexName)
return res
return 3
else:
return 2
except:
return 1
def alterTableAddUnique(database: str, table: str, indexName: str, columns: list) -> int:
try:
for x in columns:
if type(x)!=int:
return 1
data = Serializable.Read('./Data/',"Data")
db = data.get(database.upper())
if db:
dataTable = Serializable.Read('./Data/',"DataTables")
tab = dataTable.get(database.upper()+"_"+table.upper())
if tab:
if min(columns) < 0 or max(columns)>= tab[2]:
return 4
database = db[0]
table = tab[0]
mode =db[1][0]
register = [indexName, database, table, columns]
res = registerRefTAbles(database, 'TABLE_REF_INDEXU_'+table, register, mode)
return res
return 3
else:
return 2
except:
return 1
def alterTableDropUnique(database: str, table: str, indexName: str) -> int:
try:
data = Serializable.Read('./Data/',"Data")
db = data.get(database.upper())
if db:
dataTable = Serializable.Read('./Data/',"DataTablesRef")
dataTables = Serializable.Read('./Data/',"DataTables")
tab = dataTable.get('TABLE_REF_FK_'+table.upper()+"_"+database.upper())
tb = dataTables.get(database.upper()+"_"+table.upper())
if tab and tb:
database = db[0]
table = tb[0]
if not buscarcreateRefTables(database, 'TABLE_REF_INDEXU_'+table, db[1][0], indexName):
return 4
res = dropRefTAbles(database, 'TABLE_REF_INDEXU_'+table, db[1][0], indexName)
return res
return 3
else:
return 2
except:
return 1
def alterTableAddIndex(database: str, table: str, indexName: str, columns: list) -> int:
try:
for x in columns:
if type(x)!=int:
return 1
data = Serializable.Read('./Data/',"Data")
db = data.get(database.upper())
if db:
dataTable = Serializable.Read('./Data/',"DataTables")
tab = dataTable.get(database.upper()+"_"+table.upper())
if tab:
if min(columns) < 0 or max(columns)>= tab[2]:
return 4
database = db[0]
table = tab[0]
mode =db[1][0]
register = [indexName, database, table, columns]
res = registerRefTAbles(database, 'TABLE_REF_INDEX_'+table, register, mode)
return res
return 3
else:
return 2
except:
return 1
def alterTableDropIndex(database: str, table: str, indexName: str) -> int:
try:
data = Serializable.Read('./Data/',"Data")
db = data.get(database.upper())
if db:
dataTable = Serializable.Read('./Data/',"DataTablesRef")
dataTables = Serializable.Read('./Data/',"DataTables")
tab = dataTable.get('TABLE_REF_FK_'+table.upper()+"_"+database.upper())
tb = dataTables.get(database.upper()+"_"+table.upper())
if tab and tb:
database = db[0]
table = tb[0]
if not buscarcreateRefTables(database, 'TABLE_REF_INDEX_'+table, db[1][0], indexName):
return 4
res = dropRefTAbles(database, 'TABLE_REF_INDEX_'+table, db[1][0], indexName)
return res
return 3
else:
return 2
except:
return 1
def alterTable(database: str, tableOld: str, tableNew: str) -> int:
checkData()
try:
if not validateIdentifier(tableNew):
return 1
data = Serializable.Read('./Data/',"Data")
db = data.get(database.upper())
dataTable = Serializable.Read('./Data/',"DataTables")
tab = dataTable.get(database.upper()+"_"+tableOld.upper())
if db:
if tab:
database = db[0]
tableOld = tab[0]
if tab[1] == 'avl':
res = avl.alterTable(database, tableOld, tableNew)
elif tab[1] == 'b':
res = b.alterTable(database, tableOld, tableNew)
elif tab[1] == 'bplus':
res = bplus.alterTable(database, tableOld, tableNew)
elif tab[1] == 'dict':
res = dict.alterTable(database, tableOld, tableNew)
elif tab[1] == 'isam':
res = isam.alterTable(database, tableOld, tableNew)
elif tab[1] == 'json':
res = json.alterTable(database, tableOld, tableNew)
elif tab[1] == 'hash':
res = hash.alterTable(database, tableOld, tableNew)
if not res:
tab[0]=tableNew
dataTable[database.upper()+"_"+tableNew.upper()] = tab
del dataTable[database.upper()+"_"+tableOld.upper()]
if os.path.isfile("./Data/security/"+database+"_"+tableOld+".json"):
os.rename("./Data/security/"+database+"_"+tableOld+".json","./Data/security/"+database+"_"+tableNew+".json")
Serializable.update('./Data', 'DataTables', dataTable)
return res
else:
return 3
else:
return 2
except:
return 1
def alterAddColumn(database: str, table: str, default: any) -> int:
checkData()
try:
data = Serializable.Read('./Data/',"Data")
db = data.get(database.upper())
dataTable = Serializable.Read('./Data/',"DataTables")
tab = dataTable.get(database.upper()+"_"+table.upper())
if db:
if tab:
database = db[0]
table = tab[0]
if type(default) == str:
default.encode(db[2], 'strict')
if tab[4]!=-2:
if type(default) == str:
import zlib
default = zlib.compress(default.encode(), tab[4]).hex()
rows1 = extractTable(database, table)
if tab[1] == 'avl':
res = avl.alterAddColumn(database, table, default)
elif tab[1] == 'b':
res = b.alterAddColumn(database, table, default)
elif tab[1] == 'bplus':
res = bplus.alterAddColumn(database, table, default)
elif tab[1] == 'dict':
res = dict.alterAddColumn(database, table, default)
elif tab[1] == 'isam':
res = isam.alterAddColumn(database, table, default)
elif tab[1] == 'json':
res = json.alterAddColumn(database, table, default)
elif tab[1] == 'hash':
res = hash.alterAddColumn(database, table, default)
if not res:
tab[2]+=1
rows2 = extractTable(database, table)
dataTable[database.upper()+"_"+table.upper()] = tab
Serializable.update('./Data', 'DataTables', dataTable)
if os.path.isfile('./Data/security/'+database+"_"+table+".json"):
for row in rows1:
index = rows1.index(row)
row2 = rows2[index]
block.blockchain().dropAddColumn(row, row2, database, table)
return res
else:
return 3
else:
return 2
except:
return 1
def alterDropColumn(database: str, table: str, columnNumber: int) -> int:
checkData()
try:
data = Serializable.Read('./Data/',"Data")
db = data.get(database.upper())
dataTable = Serializable.Read('./Data/',"DataTables")
tab = dataTable.get(database.upper()+"_"+table.upper())
if db:
if tab:
database = db[0]
table = tab[0]
rows1 = extractTable(database, table)
if tab[1] == 'avl':
res = avl.alterDropColumn(database, table, columnNumber)
elif tab[1] == 'b':
res = b.alterDropColumn(database, table, columnNumber)
elif tab[1] == 'bplus':
res = bplus.alterDropColumn(database, table, columnNumber)
elif tab[1] == 'dict':
res = dict.alterDropColumn(database, table, columnNumber)
elif tab[1] == 'isam':
res = isam.alterDropColumn(database, table, columnNumber)
elif tab[1] == 'json':
res = json.alterDropColumn(database, table, columnNumber)
elif tab[1] == 'hash':
res = hash.alterDropColumn(database, table, columnNumber)
if not res:
rows2 = extractTable(database, table)
tab[2]-=1
dataTable[database.upper()+"_"+table.upper()] = tab
Serializable.update('./Data', 'DataTables', dataTable)
if os.path.isfile('./Data/security/'+database+"_"+table+".json"):
for row in rows1:
index = rows1.index(row)
row2 = rows2[index]
block.blockchain().dropAddColumn(row, row2, database, table)
return res
else:
return 3
else:
return 2
except:
return 1
def dropTable(database: str, table: str) -> int:
checkData()
try:
data = Serializable.Read('./Data/',"Data")
db = data.get(database.upper())
dataTable = Serializable.Read('./Data/',"DataTables")
tab = dataTable.get(database.upper()+"_"+table.upper())
if db:
if tab:
database = db[0]
table = tab[0]
mod = None
if tab[1] == 'avl':
res = avl.dropTable(database, table)
mod = avl
elif tab[1] == 'b':
res = b.dropTable(database, table)
mod = b
elif tab[1] == 'bplus':
res = bplus.dropTable(database, table)
mod = bplus
elif tab[1] == 'dict':
res = dict.dropTable(database, table)
mod = dict
elif tab[1] == 'isam':
res = isam.dropTable(database, table)
mod = isam
elif tab[1] == 'json':
res = json.dropTable(database, table)
mod = json
elif tab[1] == 'hash':
res = hash.dropTable(database, table)
mod = hash
if not res:
if not len(mod.showTables(database)) and db[1][0]!=tab[1]:
mod.dropDatabase(database)
db[1].remove(tab[1])
data[database.upper()] = db
del dataTable[database.upper()+"_"+table.upper()]
Serializable.update('./Data', 'Data', data)
Serializable.update('./Data', 'DataTables', dataTable)
return res
else:
return 3
else:
return 2
except:
return 1
def alterTableMode(database: str, table: str, mode: str) -> int:
checkData()
try:
data = Serializable.Read('./Data/',"Data")
db = data.get(database.upper())
dataTable = Serializable.Read('./Data/',"DataTables")
tab = dataTable.get(database.upper()+"_"+table.upper())
if mode not in ['avl', 'b', 'bplus', 'dict', 'isam', 'json', 'hash']:
return 4
if db:
if not tab:
return 3
database = db[0]
table = tab[0]
if mode == tab[1]:
return 1
tuplas = None
if mode not in db[1]:
db[1].append(mode)
if tab[1] == 'avl':
tuplas = avl.extractTable(database, table)
modo = avl
avl.dropTable(database, table)
elif tab[1] == 'b':
tuplas = b.extractTable(database, table)
b.dropTable(database, table)
modo = b
elif tab[1] == 'bplus':
tuplas = bplus.extractTable(database, table)
bplus.dropTable(database, table)
modo = bplus
elif tab[1] == 'dict':
tuplas = dict.extractTable(database, table)
dict.dropTable(database, table)
modo = dict
elif tab[1] == 'isam':
tuplas = isam.extractTable(database, table)
isam.dropTable(database, table)
modo = isam
elif tab[1] == 'json':
tuplas = json.extractTable(database, table)
json.dropTable(database, table)
modo = json
elif tab[1] == 'hash':
tuplas = hash.extractTable(database, table)
hash.dropTable(database, table)
modo = hash
if tab[1] != db[1][0] and tuplas!=None:
if not len(modo.showTables(database)):
modo.dropDatabase(database)
db[1].remove(tab[1])
if tuplas!=None:
if mode == 'avl':
mod = avl
elif mode == 'b':
mod = b
elif mode == 'bplus':
mod = bplus
elif mode == 'isam':
mod = isam
elif mode == 'dict':
mod = dict
elif mode == 'json':
mod = json
elif mode == 'hash':
mod = hash
import csv
tipado = []
file = open("./data/change.csv", "w", newline='', encoding='utf-8-sig')
spamreader = csv.writer(file, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL)
if mod.showTables(database) == None:
mod.createDatabase(database)
mod.createTable(database, table, tab[2])
for y in tuplas:
tipado_tupla = []
for t in y:
tipado_tupla.append(type(t))
tipado.append(tipado_tupla)
spamreader.writerow(y)
file.close()
if len(tab[3]):
mod.alterAddPK(database, table, tab[3])
mod.loadCSV("./data/change.csv", database, table, tipado)
os.remove("./data/change.csv")
data[database.upper()] = db
tab[1] = mode
dataTable[database.upper()+"_"+table.upper()] = tab
Serializable.update('./Data', 'Data', data)
Serializable.update('./Data', 'DataTables', dataTable)
return 0
else:
return 3
else:
return 2
except:
return 1
def safeModeOn(database: str, table: str)->int:
checkData()
try:
data = Serializable.Read('./Data/',"Data")
db = data.get(database.upper())
dataTable = Serializable.Read('./Data/',"DataTables")
tab = dataTable.get(database.upper()+"_"+table.upper())
if db:
if tab:
database = db[0]
table = tab[0]
if os.path.isfile("./Data/security/"+database+"_"+table+".json"):
return 4
block.blockchain().crear(database, table)
return 0
return 3
return 2
except:
return 1
def safeModeOff(database: str, table: str)->int:
checkData()
try:
data = Serializable.Read('./Data/',"Data")
db = data.get(database.upper())
dataTable = Serializable.Read('./Data/',"DataTables")
tab = dataTable.get(database.upper()+"_"+table.upper())
if db:
if tab:
database = db[0]
table = tab[0]
if not os.path.isfile("./Data/security/"+database+"_"+table+".json"):
return 4
os.remove("./Data/security/"+database+"_"+table+".json")
return 0
return 3
return 2
except:
return 1
#----------------Tupla-------------------#
def insert(database: str, table: str, register: list) -> int:
checkData()
try:
data = Serializable.Read('./Data/',"Data")
db = data.get(database.upper())
dataTable = Serializable.Read('./Data/',"DataTables")
tab = dataTable.get(database.upper()+"_"+table.upper())
if db:
if tab:
database = db[0]
table = tab[0]
register2 = register[:]
for x in register:
if type(x)==str:
x.encode(db[2], "strict")
if tab[4] != -2:
import zlib
index = register.index(x)
register[index] = zlib.compress(x.encode(), tab[4]).hex()
if tab[1] == 'avl' :
res = avl.insert(database, table, register)
elif tab[1] == 'b':
res = b.insert(database, table, register)
elif tab[1] == 'bplus':
res = bplus.insert(database, table, register)
elif tab[1] == 'dict':
res = dict.insert(database, table, register)
elif tab[1] == 'isam':
res = isam.insert(database, table, register)
elif tab[1] == 'json':
res = json.insert(database, table, register)
elif tab[1] == 'hash':
res = hash.insert(database, table, register)
if not res:
if os.path.isfile("./Data/security/"+database+"_"+table+".json"):
block.blockchain().insert(register2, database, table)
return res
return 3
else:
return 2
except:
return 1
def loadCSV(file: str, database: str, table: str) -> list:
checkData()
try:
data = Serializable.Read('./Data/',"Data")
db = data.get(database.upper())
dataTable = Serializable.Read('./Data/',"DataTables")
tab = dataTable.get(database.upper()+"_"+table.upper())
if db:
if tab:
database = db[0]
table = tab[0]
res = []
tabla = []
import csv
ff = open("./data/change.csv", "w", newline='', encoding='utf-8-sig')
spamreader = csv.writer(ff, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL)
with open(file, 'r', encoding='utf-8-sig') as fil:
reader = csv.reader(fil, delimiter=',')
for y in reader:
for g in y:
if type(g) == str:
g.encode(db[2], errors='strict')
if tab[4] != -2:
import zlib
index = y.index(g)
y[index] = zlib.compress(g.encode(), tab[4]).hex()
spamreader.writerow(y)
tabla.append(y)
fil.close()
ff.close()
file = "./data/change.csv"
if tab[1] == 'avl':
res = avl.loadCSV(file, database, table, None)
elif tab[1] == 'b':
res = b.loadCSV(file, database, table, None)
elif tab[1] == 'bplus':
res = bplus.loadCSV(file, database, table, None)
elif tab[1] == 'dict':
res = dict.loadCSV(file, database, table, None)
elif tab[1] == 'isam':
res = isam.loadCSV(file, database, table, None)
elif tab[1] == 'json':
res = json.loadCSV(file, database, table, None)
elif tab[1] == 'hash':
res = hash.loadCSV(file, database, table, None)
if len(tabla):
if os.path.isfile("./Data/security/"+database+"_"+table+".json"):
i=0
for r in res:
if r ==0:
block.blockchain().insert(tabla[i], database, table)
i+=1
return res
return []
else:
return []
except:
return []
def extractRow(database: str, table: str, columns: list) -> list:
checkData()
try:
data = Serializable.Read('./Data/',"Data")
db = data.get(database.upper())
dataTable = Serializable.Read('./Data/',"DataTables")
tab = dataTable.get(database.upper()+"_"+table.upper())
if db:
if tab:
database = db[0]
table = tab[0]
if tab[4] != -2:
import zlib
for x in columns:
if type(x) == str:
index = columns.index(x)
columns[index] = zlib.compress(x.encode(), tab[4]).hex()
if tab[1] == 'avl':
res = avl.extractRow(database, table, columns)
elif tab[1] == 'b':
res = b.extractRow(database, table, columns)
elif tab[1] == 'bplus':
res = bplus.extractRow(database, table, columns)
elif tab[1] == 'dict':
res = dict.extractRow(database, table, columns)
elif tab[1] == 'isam':
res = isam.extractRow(database, table, columns)
elif tab[1] == 'json':
res = json.extractRow(database, table, columns)
elif tab[1] == 'hash':
res = hash.extractRow(database, table, columns)
ret = []
if len(res) and tab[4]!=-2:
import zlib
for x in res:
if type(x) == str:
ret.append(zlib.decompress(bytes.fromhex(x)).decode())
else:
ret.append(x)
if len(ret):
return ret
else:
return res
return None
else:
return None
except:
return None
def update(database: str, table: str, register: dict, columns: list) -> int:
checkData()
try:
data = Serializable.Read('./Data/',"Data")
db = data.get(database.upper())
dataTable = Serializable.Read('./Data/',"DataTables")
tab = dataTable.get(database.upper()+"_"+table.upper())
if db:
if tab:
database = db[0]
table = tab[0]
register2 = {}
register2.update(register)
row = extractRow(database, table, columns)
for x in list(register.values()):
if type(x)==str:
x.encode(db[2], "strict")
if tab[4] != -2:
import zlib
for x in register.keys():
if type(register[x]) == str:
register[x] = zlib.compress(register[x].encode(), tab[4]).hex()
if tab[1] == 'avl':
res = avl.update(database, table, register, columns)
elif tab[1] == 'b':
res = b.update(database, table, register, columns)
elif tab[1] == 'bplus':
res = bplus.update(database, table, register, columns)
elif tab[1] == 'dict':
res = dict.update(database, table, register, columns)
elif tab[1] == 'isam':
res = isam.update(database, table, register, columns)
elif tab[1] == 'json':
res = json.update(database, table, register, columns)
elif tab[1] == 'hash':
res = hash.update(database, table, register, columns)
if not res:
if os.path.isfile('./Data/security/'+database+"_"+table+".json"):
row2 = row[:]
values = list(register2.values())
for x in list(register2.keys()):
row2[x] = register2[x]
block.blockchain().CompararHash(row, row2, database, table)
return res
return 3
else:
return 2
except:
return 1
def delete(database: str, table: str, columns: list) -> int:
checkData()
try:
data = Serializable.Read('./Data/',"Data")
db = data.get(database.upper())
dataTable = Serializable.Read('./Data/',"DataTables")
tab = dataTable.get(database.upper()+"_"+table.upper())
if db:
if tab:
database = db[0]
table = tab[0]
row = extractRow(database, table, columns)
if tab[1] == 'avl':
res = avl.delete(database, table, columns)
elif tab[1] == 'b':
res = b.delete(database, table, columns)
elif tab[1] == 'bplus':
res = bplus.delete(database, table, columns)
elif tab[1] == 'dict':
res = dict.delete(database, table, columns)
elif tab[1] == 'isam':
res = isam.delete(database, table, columns)
elif tab[1] == 'json':
res = json.delete(database, table, columns)
elif tab[1] == 'hash':
res = hash.delete(database, table, columns)
if not res:
if os.path.isfile('./Data/security/'+database+"_"+table+".json"):
block.blockchain().EliminarHash(row, database, table)
return res
return 3
else:
return 2
except:
return 1
def truncate(database: str, table: str) -> int:
checkData()
try:
data = Serializable.Read('./Data/',"Data")
db = data.get(database.upper())
dataTable = Serializable.Read('./Data/',"DataTables")
tab = dataTable.get(database.upper()+"_"+table.upper())
if db:
if tab:
database = db[0]
table = tab[0]
if tab[1] == 'avl':
res = avl.truncate(database, table)
elif tab[1] == 'b':
res = b.truncate(database, table)
elif tab[1] == 'bplus':
res = bplus.truncate(database, table)
elif tab[1] == 'dict':
res = dict.truncate(database, table)
elif tab[1] == 'isam':
res = isam.truncate(database, table)
elif tab[1] == 'json':
res = json.truncate(database, table)
elif tab[1] == 'hash':
res = hash.truncate(database, table)
if not res:
if os.path.isfile('./Data/security/'+database+"_"+table+".json"):
block.blockchain().crear(database, table)
return res
return 3
else:
return 2
except:
return 1
#------------Nuevas Funciones-------------#
#--------------Encrypt-------------------#
def encrypt(backup:str, password: str):
checkData()
try:
return crypt.encrypt(backup, password, password)
except:
return None
def decrypt(backup:str, password: str):
checkData()
try:
return crypt.decrypt(backup, password, password)
except:
return None
#--------------Checksum------------------#
def checksumDatabase(database: str, mode: str) -> str:
checkData()
try:
data = Serializable.Read('./Data/',"Data")
db = data.get(database.upper())
if db:
database = db[0]
tables = []
if 'avl' in db[1]:
tables += avl.showTables(database)
if 'b' in db[1]:
tables += b.showTables(database)
if 'bplus'in db[1]:
tables += bplus.showTables(database)
if 'dict'in db[1]:
tables += dict.showTables(database)
if 'isam'in db[1]:
tables += isam.showTables(database)
if 'json'in db[1]:
tables += json.showTables(database)
if 'hash'in db[1]:
tables += hash.showTables(database)
if len(tables):
dataTable = Serializable.Read('./Data/',"DataTables")
if mode == 'MD5':
hash_md5 = hashlib.md5()
elif mode == 'SHA256':
hash_md5 = hashlib.sha256()
else:
return None
for x in tables:
tab = dataTable.get(database.upper()+"_"+x.upper())
if tab:
mod = tab[1]
else:
mod = db[1][0]
if mod == 'avl':
hash_md5.update(open('./Data/avlMode/'+database+"_"+x+".tbl",'rb').read())
elif mod == 'b':
hash_md5.update(open('./Data/B/'+database+"-"+x+"-b.bin",'rb').read())
elif mod == 'isam':
hash_md5.update(open('./Data/ISAMMode/tables/'+database+x+".bin",'rb').read())
elif mod == 'bplus':
hash_md5.update(open('./Data/BPlusMode/'+database+"/"+x+"/"+x+".bin",'rb').read())
elif mod == 'dict':
hash_md5.update(open('./Data/dict/'+database+"/"+x+".bin",'rb').read())
elif mod == 'json':
hash_md5.update(open('./Data/json/'+database+"-"+x,'rb').read())
elif mod == 'hash':
hash_md5.update(open('./Data/hash/'+database+"/"+x+".bin",'rb').read())
return hash_md5.hexdigest()
return None
except:
return None
def checksumTable(database: str, table:str, mode: str) -> str:
checkData()
try:
data = Serializable.Read('./Data/',"Data")
db = data.get(database.upper())
dataTable = Serializable.Read('./Data/',"DataTables")
tab = dataTable.get(database.upper()+"_"+table.upper())
if db:
if tab:
database = db[0]
table = tab[0]
mod=tab[1]
if mode == 'MD5':
hash_md5 = hashlib.md5()
elif mode == 'SHA256':
hash_md5 = hashlib.sha256()
else:
return None
if mod == 'avl':
hash_md5.update(open('./Data/avlMode/'+database+"_"+table+".tbl",'rb').read())
elif mod == 'b':
hash_md5.update(open('./Data/B/'+database+"-"+table+"-b.bin",'rb').read())
elif mod == 'isam':
hash_md5.update(open('./Data/ISAMMode/tables/'+database+table+".bin",'rb').read())
elif mod == 'bplus':
hash_md5.update(open('./Data/BPlusMode/'+database+"/"+table+"/"+table+".bin",'rb').read())
elif mod == 'dict':
hash_md5.update(open('./Data/dict/'+database+"/"+table+".bin",'rb').read())
elif mod == 'json':
hash_md5.update(open('./Data/json/'+database+"-"+table,"rb").read())
elif mod == 'hash':
hash_md5.update(open('./Data/hash/'+database+"/"+table+".bin",'rb').read())
return hash_md5.hexdigest()
return None
except:
return None
#--------------Compress-------------------#
def alterDatabaseCompress(database, level):
checkData()
data = Serializable.Read('./Data/', "Data")
db = data.get(database.upper())
dataTable = Serializable.Read('./Data/', "DataTables")
if type(level) != int:
return 4
elif (level < 0 or level > 9) and level != -1:
return 3
if db:
if db[3] !=-2:
return 1
database = db[0]
try:
tablas = showTables(database)
if tablas:
for table in tablas:
tab = dataTable.get(database.upper() + "_" + table.upper())
if tab and tab[4] == -2:
tuplas = extractTable(database, table)
if tuplas != None:
import zlib
mod = None
if tab[1] == 'avl':
mod = avl
elif tab[1] == 'b':
mod = b
elif tab[1] == 'bplus':
mod = bplus
elif tab[1] == 'hash':
mod = hash
elif tab[1] == 'json':
mod = json
elif tab[1] == 'dict':
mod = dict
elif tab[1] == 'isam':
mod = isam
import csv
file = open("./data/change.csv", "w", newline='', encoding='utf-8-sig')
spamreader = csv.writer(file, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL)
tipado = []
compressed_lista = []
for y in tuplas:
compressed_data = []
for item in y:
compressed_item = item
if type(item) == bytes or type(item) == bytearray:
compressed_item = zlib.compress(item, level).hex()
elif type(item) == str:
compressed_item = zlib.compress(item.encode(), level).hex()
compressed_data.append(compressed_item)
compressed_lista.append(compressed_data)
for y in compressed_lista:
tipado_tupla = []
for t in y:
tipado_tupla.append(type(t))
tipado.append(tipado_tupla)
spamreader.writerow(y)
file.close()
truncate(database, table)
mod.loadCSV("./data/change.csv", database, table, tipado)
os.remove("./data/change.csv")
except:
return 1
if tablas:
for table in showTables(database):
tab = dataTable.get(database.upper() + "_" + table.upper())
if tab:
tab[4] = level
dataTable[database.upper()+"_"+table.upper()] = tab
Serializable.update('./Data', 'DataTables', dataTable)
db[3] = level
data[database] = db
Serializable.update('./Data', 'Data', data)
return 0
else:
return 2
def alterDatabaseDecompress(database):
checkData()
data = Serializable.Read('./Data/', "Data")
db = data.get(database.upper())
dataTable = Serializable.Read('./Data/', "DataTables")
if db:
database = db[0]
try:
tablas = showTables(database)
if tablas:
for table in tablas:
tab = dataTable.get(database.upper() + "_" + table.upper())
if tab:
if db[3] == -2:
return 3
tuplas = extractTable(database, table)
if tuplas != None:
truncate(database, table)
import zlib
mod = None
if tab[1] == 'avl':
mod = avl
elif tab[1] == 'b':
mod = b
elif tab[1] == 'bplus':
mod = bplus
elif tab[1] == 'hash':
mod = hash
elif tab[1] == 'json':
mod = json
elif tab[1] == 'dict':
mod = dict
elif tab[1] == 'isam':
mod = isam
import csv
file = open("./data/change.csv", "w", newline='', encoding='utf-8-sig')
spamreader = csv.writer(file, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL)
tipado = []
for y in tuplas:
tipado_tupla = []
for t in y:
tipado_tupla.append(type(t))
tipado.append(tipado_tupla)
spamreader.writerow(y)
file.close()
truncate(database, table)
mod.loadCSV("./data/change.csv", database, table, tipado)
os.remove("./data/change.csv")
except:
return 1
if tablas:
for table in showTables(database):
tab = dataTable.get(database.upper() + "_" + table.upper())
if tab:
tab[4] = -2
dataTable[database.upper()+"_"+table.upper()] = tab
Serializable.update('./Data', 'DataTables', dataTable)
db[3] = -2
data[database.upper()] = db
Serializable.update('./Data', 'Data', data)
return 0
else:
return 2
def alterTableCompress(database, table, level):
checkData()
data = Serializable.Read('./Data/', "Data")
db = data.get(database.upper())
dataTable = Serializable.Read('./Data/', "DataTables")
if type(level) != int:
return 4
elif (level < 0 or level > 9) and level != -1:
return 4
if db:
tab = dataTable.get(database.upper() + "_" + table.upper())
try:
if tab:
database = db[0]
table = tab[0]
if tab[4] != -2:
return 1
tuplas = extractTable(database, table)
if tuplas != None:
import zlib
tipado = []
if tab[1] == 'avl':
mod = avl
elif tab[1] == 'b':
mod = b
elif tab[1] == 'bplus':
mod = bplus
elif tab[1] == 'hash':
mod = hash
elif tab[1] == 'json':
mod = json
elif tab[1] == 'dict':
mod = dict
elif tab[1] == 'isam':
mod = isam
import csv
file = open("./data/change.csv", "w", newline='', encoding='utf-8-sig')
spamreader = csv.writer(file, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL)
compress_list = []
for y in tuplas:
compressed_data = []
for item in y:
compressed_item = item
if type(item) == bytes or type(item) == bytearray:
compressed_item = zlib.compress(item, level).hex()
elif type(item) == str:
compressed_item = zlib.compress(item.encode(), level).hex()
compressed_data.append(compressed_item)
compress_list.append(compressed_data)
tipado = []
for y in compress_list:
tipado_tupla = []
for t in y:
tipado_tupla.append(type(t))
tipado.append(tipado_tupla)
spamreader.writerow(y)
file.close()
truncate(database, table)
mod.loadCSV("./data/change.csv", database, table, tipado)
os.remove("./data/change.csv")
else:
return 3
except:
return 1
tab[4] = level
dataTable[database.upper()+"_"+table.upper()] = tab
Serializable.update('./Data', 'DataTables', dataTable)
return 0
else:
return 2
def alterTableDecompress(database, table):
checkData()
data = Serializable.Read('./Data/', "Data")
db = data.get(database.upper())
dataTable = Serializable.Read('./Data/', "DataTables")
if db:
tab = dataTable.get(database.upper() + "_" + table.upper())
try:
if tab:
if tab[4] == -2:
return 4
database = db[0]
table = tab[0]
tuplas = extractTable(database, table)
if tuplas != None:
truncate(database, table)
import zlib
tipado = []
if tab[1] == 'avl':
mod = avl
elif tab[1] == 'b':
mod = b
elif tab[1] == 'bplus':
mod = bplus
elif tab[1] == 'hash':
mod = hash
elif tab[1] == 'json':
mod = json
elif tab[1] == 'dict':
mod = dict
elif tab[1] == 'isam':
mod = isam
import csv
file = open("./data/change.csv", "w", newline='', encoding='utf-8-sig')
spamreader = csv.writer(file, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL)
tipado = []
for y in tuplas:
tipado_tupla = []
for t in y:
tipado_tupla.append(type(t))
tipado.append(tipado_tupla)
spamreader.writerow(y)
file.close()
truncate(database, table)
mod.loadCSV("./data/change.csv", database, table, tipado)
os.remove("./data/change.csv")
else:
return 3
except:
return 1
tab[4] = -2
dataTable[database.upper()+"_"+table.upper()] = tab
Serializable.update('./Data', 'DataTables', dataTable)
return 0
else:
return 2
def graphDSD(database: str) -> str:
checkData()
try:
nodos = []
data = Serializable.Read('./Data/',"Data")
db = data.get(database.upper())
if db:
if not os.path.isdir("./Data/Grafos/"):
os.mkdir("./Data/Grafos/")
f= open('./Data/Grafos/'+database+'.dot', 'w',encoding='utf-8')
f.write("digraph dibujo{\n")
f.write('graph [ordering="out"];')
f.write('rankdir=TB;\n')
f.write('node [shape = box];\n')
mode = ExtractModeDatabase(db)
tablas = showTables(database)
for tab in tablas:
rows = mode.extractTable(database,"TABLE_REF_FK_"+tab)
if rows:
for row in rows:
if row[2] not in nodos:
f.write(row[2]+' [label = '+row[2]+', fontsize="30", shape = box ];\n')
nodos.append(row[2])
if row[4] not in nodos:
f.write(row[4]+' [label = '+row[4]+', fontsize="30", shape = box ];\n')
nodos.append(row[4])
f.write(row[4]+'->'+ row[2]+';\n')
else:
if tab not in nodos:
nodos.append(tab)
f.write(tab+' [label = '+tab+', fontsize="30", shape = box ];\n')
f.write('}')
f.close()
os.system('dot -Tpng ./Data/Grafos/'+database+'.dot -o '+database+'.png')
return os.getcwd()+"\\Data\\Grafos\\"+database+".dot"
return None
except:
return None
def graphDF(database: str, table: str) -> str:
checkData()
try:
data = Serializable.Read('./Data/',"Data")
db = data.get(database.upper())
if db:
dataTable = Serializable.Read('./Data/',"DataTables")
tab = dataTable.get(database.upper()+"_"+table.upper())
if tab:
database = db[0]
table = tab[0]
if not os.path.isdir("./Data/Grafos/"):
os.mkdir("./Data/Grafos/")
f= open('./Data/Grafos/'+database+"_"+table+'_DF.dot', 'w',encoding='utf-8')
f.write("digraph dibujo{\n")
f.write('graph [ordering="out", ranksep = 5, nodesep = 0.5];')
f.write('rankdir=TB;\n')
f.write('node [shape = record];\n')
mode = ExtractModeDatabase(db)
rows = mode.extractTable(database,"TABLE_REF_INDEXU_"+table)
primarias = tab[3]
unicas = []
normales = []
for x in primarias:
f.write(str(x)+' [label = "Primary|'+str(x)+'", fontsize="30", fillcolor = white, style = filled];\n')
if len(rows):
for row in rows:
for x in row[3]:
if x not in unicas and x not in primarias:
f.write(str(x)+' [label = "Unique|'+str(x)+'", fontsize="30", fillcolor = white, style = filled];\n')
unicas.append(x)
for y in range(tab[2]):
if y not in unicas and y not in primarias:
f.write(str(y)+' [label = '+str(y)+', fontsize="30", shape = box ];\n')
normales.append(y)
for p in primarias:
for n in normales:
f.write(str(p)+'->'+ str(n)+';\n')
for p in unicas:
for n in normales:
f.write(str(p)+'->'+ str(n)+';\n')
f.write('}')
f.close()
os.system('dot -Tpng ./Data/Grafos/'+database+"_"+table+'_DF.dot -o '+database+'_'+table+'_DF.png')
return os.getcwd()+"\\Data\\Grafos\\"+database+"_"+table+"_DF.dot"
return None
except:
return None
def ExtractModeDatabase(data):
if data[1][0] == 'avl':
return avl
elif data[1][0] == 'b':
return b
elif data[1][0] == 'bplus':
return bplus
elif data[1][0] == 'dict':
return dict
elif data[1][0] == 'isam':
return isam
elif data[1][0] == 'json':
return json
elif data[1][0] == 'hash':
return hash
| [
"pascodom@gmail.com"
] | pascodom@gmail.com |
e2e24e924dd08430e582554e7321d4125ec6c862 | c40e84f6ca54fd85fc4f91740f6d35b9e693584a | /LeetCode/Python/073 Set Matrix Zeroes.py | 2626b6ad540243c985a9763a59b8dc676a17801a | [] | no_license | arif-hanif/Algorithm | 8b4d7b7e1c32524558f35bcca2f70b6283b16370 | 84b5be24f7b083b6fab6228a49eb279ab764ccda | refs/heads/master | 2021-01-15T16:42:29.079179 | 2016-09-10T11:32:25 | 2016-09-10T11:32:25 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,025 | py | # -*- coding: utf-8 -*-
'''
Set Matrix Zeroes
=================
Given a m x n matrix, if an element is 0, set its entire row and column to 0.
Do it in place.
Follow up:
Did you use extra space?
A straight forward solution using O(mn) space is probably a bad idea.
A simple improvement uses O(m + n) space, but still not the best solution.
Could you devise a constant space solution?
'''
class Solution(object):
'''算法思路:
把有 0 的行和列保存起来,然后遍历把相关的列和行设置为 0 即可
'''
def setZeroes(self, matrix):
if not matrix:
return
rows, cols = set(), set()
for i, row in enumerate(matrix):
for j, v in enumerate(row):
if v == 0:
rows.add(i)
cols.add(j)
for r in rows:
for j in xrange(len(matrix[0])):
matrix[r][j] = 0
for c in cols:
for i in xrange(len(matrix)):
matrix[i][c] = 0
| [
"shiyanhui66@gmail.com"
] | shiyanhui66@gmail.com |
b6482123aff9a7f3534d1b54a7a1b44d4566812b | 60715c9ea4c66d861708531def532814eab781fd | /python-programming-workshop/test/pythondatastructures/pythonbuiltinds/list_comprehensionn/listcompmorecomplicated.py | 73a0917342bf4c8b19047cc746f0628e88fafa51 | [] | no_license | bala4rtraining/python_programming | 6ce64d035ef04486f5dc9572cb0975dd322fcb3e | 99a5e6cf38448f5a01b310d5f7fa95493139b631 | refs/heads/master | 2023-09-03T00:10:26.272124 | 2021-11-01T08:20:52 | 2021-11-01T08:20:52 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 97 | py |
result = [(x,y) for x in range(5) if x % 2 == 0 for y in range(5) if y % 2 == 1]
print(result)
| [
"karthikkannan@gmail.com"
] | karthikkannan@gmail.com |
058b037d0b0d3b509ff76eb7bf58363bb8ba915b | fc02b2f532fbf87243c3058862bc6efe4318cc4e | /openstack/tests.py | ad95fb3fe946d99168c1afce4323dbd80c17764f | [] | no_license | strongit/cmdb-2 | 26ed30314346674c4fe4c6e42f3cc7e09ad6f244 | 2d1f29ffc245ea56f77f64a2d97cf6d5e5a3e659 | refs/heads/master | 2021-01-14T10:18:39.616019 | 2016-07-23T09:30:05 | 2016-07-23T09:30:05 | 82,017,417 | 1 | 0 | null | 2017-02-15T03:41:38 | 2017-02-15T03:41:38 | null | UTF-8 | Python | false | false | 2,351 | py | from django.test import TestCase
from openstack import identityapi
# Create your tests here.
if __name__=='__main__':
print "test authenticate"
r=identityapi.authenticate('40.40.40.187',5000,'admin','admin','secrete')
print r['tokenId']
print r['expires']
print r['tenantId']
tokenid=r['tokenId']
#print r['serviceCatalog']
if r['serviceCatalog']['nova']:
uri=r['serviceCatalog']['nova']
#ss = nova.listServers(uri,tokenid)
#print nova.serverDetail(uri,ss[0]['id'],tokenid)
#fid = nova.createFlavor(uri,'api',1,64,64,1,tokenid)
#nova.deleteFlavor(uri,'f5a9eff3-3694-4090-8d37-c400bde82aaf',tokenid)
#ff = nova.listFlavors(uri,tokenid)
#for f in ff.items():
# print nova.flavorDetail(uri, f[0], tokenid)
#print nova.importKeypair(uri, 'test', 'blabla', tokenid)
#kk = nova.listKeypairs(uri,tokenid)
#print nova.keypairDetail(uri,kk[0]['name'],tokenid)
#nova.deleteKeypair(uri, kk[0]['name'], tokenid)
#print nova.createNetwork(uri, "test", "192.168.3.128/24", tokenid,dns1='114.114.114.114')
#nn = nova.listNetwork(uri, tokenid)
#print nova.networkDetail(uri, nn[0]['id'], tokenid)
#print nova.deleteNetwork(uri, nn[0]['id'], tokenid)
#print nova.reserveIp(uri, "192.168.3.150", tokenid)
#name='vm3'
#flavorid='3f09acbe-07c6-4280-b1d4-a38e0afc43ad'
#volumeid='52afc0dc-a0c5-47e6-b6b7-e65d31dc1dc7'
#networkid='4d95cd35-52a5-436d-8d9f-50d4f4273487'
#fixip='40.40.40.3'
#keyname='admin-key'
#print nova.createServer(uri,name,flavorid,volumeid,networkid,fixip,keyname,tokenid)
if r['serviceCatalog']['cinderv2']:
uri=r['serviceCatalog']['cinderv2']
#print cinder.createVolume(uri, 'vm3-vol', 10, tokenid,imageRef='b9df73e0-d5b1-44ac-8bdf-ec8863bd9874')
#cc = cinder.listVolumes(uri,tokenid)
#print cinder.volumeDetail(uri, cc[0]['id'], tokenid)
#print cinder.deleteVolume(uri, cc[0]['id'], tokenid)
if r['serviceCatalog']['glance']:
uri=r['serviceCatalog']['glance']
#print glance.createImage(uri, "test", "bare", "raw", tokenid)
#gg = glance.listImages(uri, tokenid)
#print gg[1]['id']
#print glance.ImageDetail(uri, gg[0]['id'], tokenid) | [
"qsm365@gmail.com"
] | qsm365@gmail.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.