blob_id
stringlengths
40
40
directory_id
stringlengths
40
40
path
stringlengths
2
616
content_id
stringlengths
40
40
detected_licenses
listlengths
0
69
license_type
stringclasses
2 values
repo_name
stringlengths
5
118
snapshot_id
stringlengths
40
40
revision_id
stringlengths
40
40
branch_name
stringlengths
4
63
visit_date
timestamp[us]
revision_date
timestamp[us]
committer_date
timestamp[us]
github_id
int64
2.91k
686M
star_events_count
int64
0
209k
fork_events_count
int64
0
110k
gha_license_id
stringclasses
23 values
gha_event_created_at
timestamp[us]
gha_created_at
timestamp[us]
gha_language
stringclasses
213 values
src_encoding
stringclasses
30 values
language
stringclasses
1 value
is_vendor
bool
2 classes
is_generated
bool
2 classes
length_bytes
int64
2
10.3M
extension
stringclasses
246 values
content
stringlengths
2
10.3M
authors
listlengths
1
1
author_id
stringlengths
0
212
0783edba12e310cd16522a7749480e2ed5a2ae59
b9e7394c681004ea0b6c54d0c7c5cd79f9d33221
/practice/prac_app/forms.py
61665cb70de4bb0cc0b0585dc20f59fd0a8fcd87
[]
no_license
Rashika233/Django-Project
f4e32bfa7473ad2816c2601acff1c9249b3964ef
92bc8dda8a987eb7594405c6fcc9693683d5568a
refs/heads/master
2023-03-03T14:43:16.159779
2021-02-11T21:47:37
2021-02-11T21:47:37
338,161,201
0
0
null
null
null
null
UTF-8
Python
false
false
281
py
# import form class from django from django import forms # import GeeksModel from models.py from .models import GeeksModel # create a ModelForm class GeeksForm(forms.ModelForm): # specify the name of model to use class Meta: model = GeeksModel fields = "__all__"
[ "rashika233" ]
rashika233
49d638d0ac51b587f37b6c70adc62984e331e32f
c06157aacc5bb1b0f93f7541be8523d8c8cc9dd8
/rgs/data/wine_quality.py
35f1ce72c2e8adf61952239cd6288c25bd3a9ff3
[]
no_license
AliD101v/ml-course-project-f19
361a5227e1b6fab7f8ede83f5d6bfdfac94b04bf
1b3bfa57c8a0c0eb9121ef29c5cb13fbf0697402
refs/heads/master
2020-08-21T08:05:35.420629
2019-12-07T06:17:13
2019-12-07T06:17:13
216,116,059
0
0
null
null
null
null
UTF-8
Python
false
false
701
py
import numpy as np import pandas as pd def load_wine_quality() : filePath_red = "C:/Users/sidha/OneDrive/Documents/ml-course-project-f19/ml-course-project-f19/data/Wine Quality/winequality-red.csv" filePath_white = "C:/Users/sidha/OneDrive/Documents/ml-course-project-f19/ml-course-project-f19/data/Wine Quality/winequality-white.csv" data_csv_red = pd.read_csv(filePath_red, delimiter=";") data_csv_white = pd.read_csv(filePath_white, delimiter=";") X_red = data_csv_red.iloc[:,:11] y_red = data_csv_red.iloc[:,11] X_white = data_csv_white.iloc[:,:11] y_white = data_csv_white.iloc[:,11] X_red.append(X_white) y_red.append(y_white) return X_red,y_red
[ "sidhant.gupta.hsp@gmail.com" ]
sidhant.gupta.hsp@gmail.com
5b3a2b73f856ba447fc17847187efdaf01f14f1b
0a2231f4eedef4a7fda1f14d5d13846529dc7cf7
/src/tweets/migrations/0001_initial.py
8c113d2ab376bd5d15669f2a850188789433ebeb
[]
no_license
kevivr/tweetme
f8b179360a5266c7487734eb4c8c21ffaf3cc7d3
8df56fe9cd7ff04233ae6e37aa151e649c410a60
refs/heads/master
2022-11-23T01:50:01.449426
2019-12-19T20:13:41
2019-12-19T20:13:41
197,088,156
0
1
null
2022-11-20T03:07:14
2019-07-16T00:00:53
Python
UTF-8
Python
false
false
539
py
# -*- coding: utf-8 -*- # Generated by Django 1.10.8 on 2019-07-16 20:42 from __future__ import unicode_literals from django.db import migrations, models class Migration(migrations.Migration): initial = True dependencies = [ ] operations = [ migrations.CreateModel( name='Tweet', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('content', models.TextField()), ], ), ]
[ "vivek.raju@venmo.com" ]
vivek.raju@venmo.com
15f8abcda943c7118eff486ce22e593b97c7cb75
55fbc3e07511a1596c365923fc1b4b3de2705e5b
/addons/source-python/plugins/map_cycle/map_cycle.py
e3a39b55708061402bad1b2176c91c4dc4404c37
[]
no_license
Hackmastr/sp-map-cycle
5ed3ec7c2d9f5b35ffc80ded2cfbf3e5199cf8ef
af9871e19a749319d860aebe9dbf5e705ff16f6b
refs/heads/master
2021-01-24T08:09:26.811840
2016-09-25T13:39:45
2016-09-25T13:39:45
69,148,951
0
0
null
2016-09-25T07:01:58
2016-09-25T07:01:58
null
UTF-8
Python
false
false
31,568
py
from datetime import datetime from json import dump as json_dump, load as json_load from random import shuffle from time import time from warnings import warn from colors import Color from core import echo_console from cvars import ConVar from events import Event from engines.server import engine_server, global_vars from entities.entity import Entity from filters.players import PlayerIter from listeners import ( OnClientActive, OnClientDisconnect, OnLevelInit, OnLevelShutdown) from listeners.tick import Delay, GameThread from loggers import LogManager from memory import get_virtual_function from memory.hooks import PreHook from menus import PagedMenu, PagedOption from messages import HudMsg from paths import CFG_PATH, GAME_PATH from players.entity import Player from stringtables.downloads import Downloadables from spam_proof_commands.say import SayCommand from spam_proof_commands.server import ServerCommand from .info import info from .mc_commands import mc_commands from .classes.keyhint_progress import keyhint_progress from .classes.server_map import ( map_cycle_extend_entry, map_cycle_whatever_entry, map_manager) from .classes.session_user import session_user_manager from .classes.user import broadcast, tell, user_manager from .models.server_map import ServerMap as DB_ServerMap from .namespaces import popups, status from .resource.config_cvars import ( config_manager, cvar_logging_areas, cvar_logging_level, cvar_scheduled_vote_time, cvar_timelimit) from .resource.sqlalchemy import Base, engine, Session from .resource.strings import strings_common, strings_popups Base.metadata.create_all(engine) NEXTMAP_MSG_COLOR = Color(124, 173, 255) NEXTMAP_MSG_X = -1 NEXTMAP_MSG_Y = 0.05 NEXTMAP_MSG_EFFECT = 2 NEXTMAP_MSG_FADEIN = 0.05 NEXTMAP_MSG_FADEOUT = 0 NEXTMAP_MSG_HOLDTIME = 10 NEXTMAP_MSG_FXTIME = 0 # Used for SpamProofCommands ANTI_SPAM_TIMEOUT_SERVER = 1.0 ANTI_SPAM_TIMEOUT_PLAYER = 1.0 # mapcycle_default.txt - in case we don't find mapcyclefile DEFAULT_MAPCYCLEFILE = "mapcycle_default.txt" # mapcycle.json MAPCYCLE_JSON_FILE = GAME_PATH / 'cfg' / "mapcycle.json" # List of files to upload to players DOWNLOADLIST = CFG_PATH / info.basename / "downloadlist.txt" # Fallback value if either mc_timelimit or mp_timelimit were invalid # Take a look at load() NEGATIVE_TIMELIMIT_FALLBACK_VALUE = 60.0 # Fallback value if mc_scheduled_vote_time exceeds or equals to mc_timelimit. # Current mc_timelimit value will be multiplied by this fallback value # and then applied to mc_scheduled_vote_time. # Take a look at schedule_vote() INVALID_SCHEDULED_VOTE_TIME_FALLBACK_VALUE = 0.33 # We will add extra seconds after vote ends to prevent # instant level changing when the vote ends EXTRA_SECONDS_AFTER_VOTE = 5.0 cvar_mapcyclefile = ConVar('mapcyclefile') cvar_mp_timelimit = ConVar('mp_timelimit') # While sound cvars may be changed in real time, we only detect # files to upload to players on plugin load downloadables = Downloadables() with open(str(DOWNLOADLIST)) as f: # TODO: Do we need str() here? for line in f: line = line.strip() if not line: continue downloadables.add(line) log = LogManager(info.basename, cvar_logging_level, cvar_logging_areas) class CorruptJSONFile(Exception): """Raised when mapcycle.json doesn't contain a list.""" pass class InvalidMapJSON(Warning): """Used to warn and skip a single map in mapcycle.json.""" pass class InvalidCVarValue(Warning): """Used to warn improper configuration.""" pass class PlayersCannotVote(Warning): """Used to warn cases when there're no maps to vote for.""" pass mapcycle_json = None def reload_mapcycle_json(): if not MAPCYCLE_JSON_FILE.isfile(): raise FileNotFoundError("Missing mapcycle.json") global mapcycle_json with open(str(MAPCYCLE_JSON_FILE)) as f: # TODO: Do we need str() here? mapcycle_json = json_load(f) def build_json_from_mapcycle_txt(): mapcycle_txt = GAME_PATH / 'cfg' / cvar_mapcyclefile.get_string() if not mapcycle_txt.isfile(): mapcycle_txt = GAME_PATH / 'cfg' / DEFAULT_MAPCYCLEFILE if not mapcycle_txt.isfile(): raise FileNotFoundError("Missing {}".format(DEFAULT_MAPCYCLEFILE)) rs = [] with open(mapcycle_txt) as f: for line in f: line = line.strip() if not line: continue if line.startswith('//'): continue rs.append({ 'filename': line, }) with open(MAPCYCLE_JSON_FILE, 'w') as f: json_dump(rs, f, indent=4) def load_maps_from_db(): session = Session() for db_server_map in session.query(DB_ServerMap).all(): map_ = map_manager.get(db_server_map.filename) if map_ is None: continue map_.in_database = True map_.detected = db_server_map.detected map_.force_old = db_server_map.force_old map_.likes = db_server_map.likes map_.dislikes = db_server_map.dislikes session.close() def save_maps_to_db(): detected = int(time()) session = Session() for server_map in map_manager.values(): db_server_map = session.query(DB_ServerMap).filter_by( filename=server_map.filename.lower()).first() if db_server_map is None: server_map.detected = detected server_map.in_database = True db_server_map = DB_ServerMap() db_server_map.filename = server_map.filename.lower() db_server_map.detected = detected session.add(db_server_map) db_server_map.force_old = server_map.force_old db_server_map.likes = server_map.likes db_server_map.dislikes = server_map.dislikes db_server_map.man_hours = server_map.man_hours db_server_map.av_session_len = server_map.av_session_len session.commit() session.close() def reload_map_list(): if not isinstance(mapcycle_json, list): raise CorruptJSONFile("Parsed object is not a list") # Check if vote has not started yet - useful to prevent things from # getting dirty because of 'mc reload-mapcycle' if status.vote_status != status.VoteStatus.NOT_STARTED: raise RuntimeError("Vote has already started or even ended, " "can't execute reload_map_list() now") map_manager.clear() for i, json_dict in enumerate(mapcycle_json): try: filename = json_dict['filename'] except KeyError: warn(InvalidMapJSON("Map #{}: missing 'filename' key")) continue if filename.lower() in map_manager: warn(CorruptJSONFile("Duplicate maps '{}'".format(filename))) continue if engine_server.is_map_valid(filename): map_manager.create(json_dict) log.log_debug("Added {} valid maps".format(len(map_manager))) # Now rebuild nomination menu def select_callback(popup, index, option): user = user_manager[index] user.nominate_callback(option.value) popups.popup_nominate = PagedMenu(select_callback=select_callback, title=strings_popups['nominate_map']) server_maps = list(map_manager.values()) for server_map in sorted( server_maps, key=lambda server_map: server_map.filename): selectable = not server_map.played_recently popups.popup_nominate.append(PagedOption( text=server_map.name, value=server_map, highlight=selectable, selectable=selectable)) log.log_debug( "Added {} maps to the !nominate menu".format(len(server_maps))) def reload_maps_from_mapcycle(): # Load JSON try: # Try to load mapcycle.json reload_mapcycle_json() log.log_debug("Loaded mapcycle.json (first try)") except FileNotFoundError: # If it fails, build mapcycle.json from the mapcyclefile file build_json_from_mapcycle_txt() # And then load mapcycle.json again, this time it # must succeed reload_mapcycle_json() log.log_debug("Loaded mapcycle.json (after building it)") # Create MapCycleMap list using loaded JSON reload_map_list() # Fill maps properties with data from the database load_maps_from_db() mp_timelimit_old_value = 0 def load(): log.log_debug("Entered load()...") # Hot plug: detect users for player in PlayerIter('human'): user_manager.create(player) reload_maps_from_mapcycle() log.log_debug("Reloaded map list from JSON") # Save old mp_timelimit value global mp_timelimit_old_value mp_timelimit_old_value = cvar_mp_timelimit.get_float() # If mc_timelimit equals to -1, grab the value from mp_timelimit if config_manager['timelimit'] < 0: cvar_timelimit.set_float(mp_timelimit_old_value) log.log_debug( "mc_timelimit was -1, set to the value of mp_timelimit " "(mp_timelimit = {})".format(mp_timelimit_old_value)) if mp_timelimit_old_value < 0: warn(InvalidCVarValue( "mp_timelimit is negative, can't grab value from it")) cvar_timelimit.set_float(NEGATIVE_TIMELIMIT_FALLBACK_VALUE) # We don't need mp_timelimit to change the maps for us cvar_mp_timelimit.set_float(0.0) # Also mark current level name (if it's loaded) as a recently played if global_vars.map_name: map_name = global_vars.map_name map_manager.recent_map_names.append(global_vars.map_name) log.log_debug("Current level name is {}".format(map_name)) status.current_map = map_manager.get(map_name) if status.current_map is None: log.log_debug("Current map '{}' is not " "from mapcycle.json!".format(map_name)) # We think that the level is loaded with us status.map_start_time = time() log.log_debug("Level start time: {}".format( datetime.fromtimestamp( status.map_start_time ).strftime('%X'))) # Schedule the vote, it will be scheduled as if the map is loaded # with us schedule_vote() # Schedule level changing - this can be later cancelled # by map extensions schedule_change_level() # ... chat message broadcast(strings_common['loaded']) def unload(): log.log_debug("Entered unload()...") # Restore mp_timelimit to its original (or changed) value cvar_mp_timelimit.set_float(mp_timelimit_old_value) # Update database save_maps_to_db() # ... chat message broadcast(strings_common['unloaded']) delay_scheduled_vote = None delay_changelevel = None delay_end_vote = None delay_likemap_survey = None def launch_vote(scheduled=False): if status.vote_status != status.VoteStatus.NOT_STARTED: return # TODO: Maybe put a warning or an exception here? log.log_debug("Launching the vote (scheduled={})".format(scheduled)) status.vote_status = status.VoteStatus.IN_PROGRESS status.vote_start_time = time() # Cancel any scheduled votes in case somebody called us directly if delay_scheduled_vote is not None and delay_scheduled_vote.running: delay_scheduled_vote.cancel() global delay_end_vote # We will assign to this later if delay_end_vote is not None and delay_end_vote.running: delay_end_vote.cancel() # Cancel likemap survey if delay_likemap_survey is not None and delay_likemap_survey.running: delay_likemap_survey.cancel() # And unsend that popup from all players popups.popup_likemap.close() # Reset maps map_cycle_whatever_entry.votes = 0 map_cycle_extend_entry.votes = 0 for server_map in map_manager.values(): server_map.votes = 0 server_map.nominations = 0 # Popup callback def select_callback(popup, index, option): # Increment votes counter for this map option.value.votes += 1 # KeyHint stats keyhint_progress.count_vote(option.value) # User callback user_manager[index].vote_callback(option.value) # Create new popup popups.popup_main = PagedMenu(select_callback=select_callback, title=strings_popups['choose_map']) # First of all, add "I Don't Care" option if it's enabled if config_manager['votemap_whatever_option']: # Add to the list popups.popup_main.append(PagedOption( text=map_cycle_whatever_entry.name, value=map_cycle_whatever_entry, )) # Only add "Extend this map..." option to scheduled votes if scheduled: # Decide if it's selectable and highlighted selectable = status.can_extend() # Add to the list popups.popup_main.append(PagedOption( text=map_cycle_extend_entry.name, value=map_cycle_extend_entry, highlight=selectable, selectable=selectable )) # Now to the actual maps # Count nominations for nominated_map in user_manager.get_nominated_maps(): nominated_map.nominations += 1 user_manager.reset_nominated_maps() server_maps = map_manager.values() # Filter hidden maps out server_maps = list(filter( lambda server_map: not server_map.is_hidden, server_maps)) if not server_maps: warn(PlayersCannotVote("Please add more maps to the server or " "reconfigure Map Cycle")) return # Do we need to do an initial alphabetic sort? if config_manager['alphabetic_sort_enabled']: # Sort by name (alphabetically) if config_manager['alphabetic_sort_by_fullname']: server_maps = sorted( server_maps, key=lambda server_map: server_map.name) else: server_maps = sorted( server_maps, key=lambda server_map: server_map.filename) else: # Shuffle shuffle(server_maps) # Now sort by rating (likes, likes - dislikes or likes:dislikes) if config_manager['likemap_enable']: server_maps = sorted( server_maps, key=lambda server_map: server_map.rating, reverse=True) # Now separate new and old maps server_maps = sorted( server_maps, key=lambda server_map: server_map.is_new, reverse=True) # Now sort by nominations server_maps = sorted( server_maps, key=lambda server_map: server_map.nominations, reverse=True) # Now put recently played maps to the end server_maps = sorted( server_maps, key=lambda server_map: server_map.played_recently) # Cap options if config_manager['votemap_max_options'] > 0: server_maps = server_maps[:config_manager['votemap_max_options']] # Fill popup with the maps for server_map in server_maps: # Add the map to the popup selectable = not server_map.played_recently popups.popup_main.append(PagedOption( text=server_map.full_caption, value=server_map, highlight=selectable, selectable=selectable )) log.log_debug("Added {} maps to the vote".format(len(server_maps))) # Send popup to players for user in user_manager.values(): user.send_popup(popups.popup_main) # Define vote end delay_end_vote = Delay(config_manager['vote_duration'], finish_vote) # Start KeyHintProgress keyhint_progress.start() # ... sound if config_manager['sound_vote_start'] is not None: config_manager['sound_vote_start'].play( *[user.player.index for user in user_manager.values()]) # ... chat message broadcast(strings_common['vote_started']) def finish_vote(): if status.vote_status != status.VoteStatus.IN_PROGRESS: return # TODO: Same, warning/exception may fit better here? log.log_debug("Finishing the vote...") status.vote_status = status.VoteStatus.ENDED # Delay might still be running if the vote finished prematurely if delay_end_vote is not None and delay_end_vote.running: delay_end_vote.cancel() if popups.popup_main is not None: popups.popup_main.close() popups.popup_main = None # Stop KeyHintProgress keyhint_progress.stop() # Recount votes to prevent reconnected players from messing things up # We only counted votes before to display them in KeyHint area for server_map in map_manager.values(): server_map.votes = 0 for voted_map in user_manager.get_voted_maps(): voted_map.votes += 1 user_manager.reset_voted_maps() server_maps = map_manager.values() server_maps = filter( lambda server_map: not server_map.is_hidden, server_maps) if status.can_extend(): candidate_maps = tuple(server_maps) + (map_cycle_extend_entry, ) else: candidate_maps = server_maps candidate_maps = sorted( candidate_maps, key=lambda server_map: server_map.votes, reverse=True) if not candidate_maps: # If there're no maps on the server, there's not much we can do log.log_debug("No maps to choose from in finish_vote()!") broadcast(strings_common['no_choice']) if delay_changelevel and delay_changelevel.running: log.log_debug("Cancelling change_level...") delay_changelevel.cancel() return # Leave only maps with max votes number result_maps = [] for server_map in candidate_maps: if server_map.votes == candidate_maps[0].votes: result_maps.append(server_map) # If you ever want to implement VIP/Premium features into # !rtv and keep it fair, here's the place: shuffle(result_maps) winner_map = result_maps[0] set_next_map(winner_map) # ... chat message if isinstance(winner_map, type(map_cycle_extend_entry)): log.log_debug("Winner map: extend-this-map option") status.used_extends += 1 broadcast( strings_common['map_extended'], time=config_manager['extend_time']) else: log.log_debug("Winner map: {}".format(winner_map.filename)) broadcast(strings_common['map_won'], map=winner_map.name) # ... sound if config_manager['sound_vote_end'] is not None: config_manager['sound_vote_end'].play( *[user.player.index for user in user_manager.values()]) def set_next_map(server_map): # First of all, check if we actually need to extend the current map if isinstance(server_map, type(map_cycle_extend_entry)): log.log_debug("Extending current level...") # Set NOT_STARTED state so that they can nominate maps and stuff status.vote_status = status.VoteStatus.NOT_STARTED # Reset RTV for each user user_manager.reset_rtv() # Cancel and relaunch delay_changelevel... global delay_changelevel if delay_changelevel is not None and delay_changelevel.running: delay_changelevel.cancel() schedule_change_level(was_extended=True) # ... and then cancel and schedule a new vote if delay_scheduled_vote is not None and delay_scheduled_vote.running: delay_scheduled_vote.cancel() schedule_vote(was_extended=True) return log.log_debug("Setting next map to {}...".format(server_map.filename)) # If we don't need to extend current map, set a new next map status.next_map = server_map def schedule_change_level(was_extended=False): # Do we even need to change levels? if config_manager['timelimit'] == 0: # If not, no reason to continue return log.log_debug( "Scheduling change_level (was_extended={})".format(was_extended)) if was_extended: seconds = config_manager['extend_time'] * 60 + EXTRA_SECONDS_AFTER_VOTE else: seconds = config_manager['timelimit'] * 60 + EXTRA_SECONDS_AFTER_VOTE global delay_changelevel delay_changelevel = Delay(seconds, change_level) status.map_end_time = time() + seconds log.log_debug("We will end the game in {} seconds.".format(seconds)) def schedule_vote(was_extended=False): # Do we even need scheduled votes? if config_manager['timelimit'] == 0: # If not, no reason to continue return log.log_debug( "Scheduling the vote (was_extended={})".format(was_extended)) # We need to decide if we schedule vote from round start or # from map extension if was_extended: # If from extension, then the total time left is in mc_extend_time # But we need to check that mc_scheduled_vote_time does not # exceed it if (config_manager['scheduled_vote_time'] >= config_manager['extend_time']): new_value = (config_manager['extend_time'] * INVALID_SCHEDULED_VOTE_TIME_FALLBACK_VALUE) warn(InvalidCVarValue( "mc_scheduled_vote_time exceeds or equals to mc_extend_time, " "falling back to {}".format(new_value))) cvar_scheduled_vote_time.set_float(new_value) # Calculate time to start the vote in seconds = (config_manager['extend_time'] * 60 - config_manager['scheduled_vote_time'] * 60 - config_manager['vote_duration']) else: # But if it's just a regular scheduled vote, then the total time left # is in mc_timelimit # But then again, we need to check mc_scheduled_vote_time against # mc_timelimit if (config_manager['scheduled_vote_time'] >= config_manager['timelimit']): new_value = (config_manager['timelimit'] * INVALID_SCHEDULED_VOTE_TIME_FALLBACK_VALUE) warn(InvalidCVarValue( "mc_scheduled_vote_time exceeds or equals to mc_timelimit, " "falling back to {}".format(new_value))) cvar_scheduled_vote_time.set_float(new_value) # Calculate time to start the vote in seconds = (config_manager['timelimit'] * 60 - config_manager['scheduled_vote_time'] * 60 - config_manager['vote_duration']) # Schedule the vote global delay_scheduled_vote delay_scheduled_vote = Delay(seconds, launch_vote, scheduled=True) log.log_debug("Scheduled vote starts in {} seconds".format(seconds)) # Schedule likemap survey if config_manager['likemap_survey_duration'] > 0: seconds = max(0, seconds - config_manager['likemap_survey_duration']) global delay_likemap_survey delay_likemap_survey = Delay(seconds, launch_likemap_survey) log.log_debug( "Scheduled likemap survey in {} seconds".format(seconds)) def change_level(round_end=False): if status.next_map is None: raise RuntimeError("It's already time to change the level, " "but next map is yet to be decided") if config_manager['instant_change_level'] or round_end: log.log_debug("Ending the game...") game_end_entity = Entity.find_or_create('game_end') game_end_entity.end_game() else: log.log_debug("Waiting for the round end to end the game...") status.round_end_needed = True if config_manager['timeleft_auto_lastround_warning']: broadcast(strings_common['timeleft_last_round']) def check_if_enough_votes(): for user in user_manager.values(): if user.voted_map is None: return finish_vote() def check_if_enough_rtv(): try: ratio = user_manager.count_rtv() except ZeroDivisionError: return if ratio >= config_manager['rtv_needed']: # Cancel change_level delay if any global delay_changelevel if delay_changelevel is not None and delay_changelevel.running: delay_changelevel.cancel() # Relaunch change_level delay seconds = config_manager['vote_duration'] + EXTRA_SECONDS_AFTER_VOTE delay_changelevel = Delay(seconds, change_level) launch_vote(scheduled=False) def launch_likemap_survey(): log.log_debug("Launching mass likemap survey") for user in user_manager.values(): reason = user.get_likemap_denial_reason() if reason is None: user.send_popup(popups.popup_likemap) @OnLevelInit def listener_on_level_init(map_name): log.log_debug( "Entered OnLevelInit listener (map_name={})...".format(map_name)) # Reset MapCycleUser instances user_manager.reset_users() # Cancel delays if any if delay_scheduled_vote is not None and delay_scheduled_vote.running: delay_scheduled_vote.cancel() global delay_changelevel # We will assign to this a few lines later if delay_changelevel is not None and delay_changelevel.running: delay_changelevel.cancel() if delay_end_vote is not None and delay_end_vote.running: delay_end_vote.cancel() # Reset Status status.vote_status = status.VoteStatus.NOT_STARTED status.next_map = None status.map_start_time = time() status.used_extends = 0 # Update database GameThread(target=save_maps_to_db).start() # Reload maps reload_maps_from_mapcycle() # Set current map in Status status.current_map = map_manager.get(map_name.lower()) if status.current_map is None: log.log_debug("Current map '{}' is not " "from mapcycle.json!".format(map_name)) # Unsend popups if popups.popup_main is not None: popups.popup_main.close() popups.popup_main = None # Add current map names to recent_map_names map_manager.recent_map_names.append(map_name) # And then cap recent_map_names map_manager.cap_recent_maps() log.log_debug( "Recent map names: {}".format(','.join(map_manager.recent_map_names))) # Schedule regular vote schedule_vote(was_extended=False) # Schedule level changing - this can be later cancelled by map extensions schedule_change_level(was_extended=False) @OnLevelShutdown def listener_on_level_shutdown(): log.log_debug("Entered OnLevelShutdown listener") # Calculate map ratings if status.current_map is not None: for rating in session_user_manager.get_map_ratings(): if rating == 1: status.current_map.likes += 1 elif rating == -1: status.current_map.dislikes += 1 session_user_manager.reset_map_ratings() for user in user_manager.values(): user_manager.delete(user.player.index) @Event('round_end') def on_round_end(game_event): if status.round_end_needed: log.log_debug("round_end event, time to change the level") change_level(round_end=True) status.round_end_needed = False @Event('cs_win_panel_match') def on_cs_win_panel_match(game_event): # Check if the vote is still in progress if status.vote_status == status.VoteStatus.IN_PROGRESS: log.log_debug("on_cs_win_panel_match: vote was still in " "progress, finishing") finish_vote() # Check if next map is decided if status.next_map is None: log.log_debug("on_cs_win_panel_match: no next_map defined!") return # Check if we need to show it on player screens if not config_manager['nextmap_show_on_match_end']: return # HudMsg hud_msg = HudMsg( strings_popups['nextmap_msg'].tokenize( map=status.next_map.name), color1=NEXTMAP_MSG_COLOR, x=NEXTMAP_MSG_X, y=NEXTMAP_MSG_Y, effect=NEXTMAP_MSG_EFFECT, fade_in=NEXTMAP_MSG_FADEIN, fade_out=NEXTMAP_MSG_FADEOUT, hold_time=NEXTMAP_MSG_HOLDTIME, fx_time=NEXTMAP_MSG_FXTIME ) hud_msg.send(*[user.player.index for user in user_manager.values()]) # SayText2 broadcast(strings_common['nextmap_msg'], map=status.next_map.name) @OnClientActive def listener_on_client_active(index): player = Player(index) if player.is_fake_client(): return user_manager.create(player) @OnClientDisconnect def listener_on_client_disconnect(index): if index in user_manager: user_manager.delete(index) if status.vote_status == status.VoteStatus.NOT_STARTED: check_if_enough_rtv() elif status.vote_status == status.VoteStatus.IN_PROGRESS: check_if_enough_votes() engine_server_changelevel = get_virtual_function(engine_server, 'ChangeLevel') @PreHook(engine_server_changelevel) def hook_on_pre_change_level(args): log.log_debug("Hooked ChangeLevel...") # Set our own next map if status.next_map is not None: args[1] = status.next_map.filename @ServerCommand(ANTI_SPAM_TIMEOUT_SERVER, 'mc') def command_on_mc(command): current_command = mc_commands['mc'] i = 0 for i in range(1, len(command)): next_command = current_command.registered_commands.get( command[i].lower()) if next_command is None: i -= 1 break current_command = next_command args = [] for j in range(i + 1, len(command)): args.append(command[j]) if current_command is mc_commands['mc']: echo_console("Unknown MC command. Type 'mc help' to get help") return current_command.callback(args) @ServerCommand(ANTI_SPAM_TIMEOUT_SERVER, 'mc_launch_vote') def command_on_mc_force_vote(command): if status.vote_status != status.VoteStatus.NOT_STARTED: echo_console("Can't launch the vote as it has " "already started or ended") return launch_vote(scheduled=False) echo_console("Map vote has been launched") @SayCommand(ANTI_SPAM_TIMEOUT_PLAYER, ('!votemap', 'votemap')) def say_command_on_votemap(command, index, team_only): user = user_manager[index] reason = user.get_vote_denial_reason() if reason is not None: tell(user.player, reason) return user.send_popup(popups.popup_main) @SayCommand(ANTI_SPAM_TIMEOUT_PLAYER, ('!nominate', 'nominate')) def say_command_on_nominate(command, index, team_only): user = user_manager[index] reason = user.get_nominate_denial_reason() if reason is not None: tell(user.player, reason) return user.send_popup(popups.popup_nominate) @SayCommand( ANTI_SPAM_TIMEOUT_PLAYER, ('!rtv', 'rtv', 'rockthevote', '!rockthevote')) def say_command_on_rtv(command, index, team_only): user_manager[index].rtv_callback() @SayCommand(ANTI_SPAM_TIMEOUT_PLAYER, '!likemap') def say_command_on_likemap(command, index, team_only): user = user_manager[index] reason = user.get_likemap_denial_reason() if reason is not None: tell(user.player, reason) return user.send_popup(popups.popup_likemap) @SayCommand(ANTI_SPAM_TIMEOUT_PLAYER, ('!nextmap', 'nextmap')) def say_command_on_nextmap(command, index, team_only): user = user_manager[index] user.nextmap_callback() @SayCommand(ANTI_SPAM_TIMEOUT_PLAYER, ('!timeleft', 'timeleft')) def say_command_on_nextmap(command, index, team_only): user = user_manager[index] user.timeleft_callback()
[ "kirill@mysnik.com" ]
kirill@mysnik.com
0d1867210ff294cb464b1787d4eb96f0ae28ac9a
a8efab6971f2c593b1b6b8779f8f95e5005af8bf
/5-15/20 王丽娟/1.py
909864f536fbf5cff6a5b509829399451acfa111
[]
no_license
xx299x/practice_pyecharts
eb5a30910e5ad33f5bd1a8a0dfa99bb4e4df8464
05fdac822b79c041ebcfc9a5e0bd1fc63fce7b2d
refs/heads/master
2022-11-16T01:57:11.361689
2020-06-17T09:06:07
2020-06-17T09:06:07
264,120,690
1
55
null
2020-06-17T09:06:08
2020-05-15T06:59:15
Jupyter Notebook
UTF-8
Python
false
false
346
py
from pyecharts.charts import Bar bar = Bar() bar.add_xaxis(["衬衫", "羊毛衫", "雪纺衫", "裤子", "高跟鞋", "袜子"]) bar.add_yaxis("商家A", [5, 20, 36, 10, 75, 90]) # render 会生成本地 HTML 文件,默认会在当前目录生成 render.html 文件 # 也可以传入路径参数,如 bar.render("mycharts.html") bar.render()
[ "wlj" ]
wlj
73ff944adf0044010ff8aff804f42bff55d30406
b007d88e6726452ffa8fe80300614f311ae5b318
/LeetCode/top_questions_from_amzaon/three_sum_closest.py
1b89843bbcb4f6c768c3269f303df3485fcee5e0
[]
no_license
jinurajan/Datastructures
ec332b12b8395f42cb769e771da3642f25ba7e7f
647fea5d2c8122468a1c018c6829b1c08717d86a
refs/heads/master
2023-07-06T14:42:55.168795
2023-07-04T13:23:22
2023-07-04T13:23:22
76,943,162
0
0
null
null
null
null
UTF-8
Python
false
false
1,809
py
""" 3Sum Closest Given an array nums of n integers and an integer target, find three integers in nums such that the sum is closest to target. Return the sum of the three integers. You may assume that each input would have exactly one solution. Example 1: Input: nums = [-1,2,1,-4], target = 1 Output: 2 Explanation: The sum that is closest to the target is 2. (-1 + 2 + 1 = 2). Constraints: 3 <= nums.length <= 10^3 -10^3 <= nums[i] <= 10^3 -10^4 <= target <= 10^4 """ class Solution: def threeSumClosest(self, nums: List[int], target: int) -> int: nums.sort() n = len(nums) diff = float("inf") for i in range(n): l = i + 1 r = n - 1 while l < r: sum_val = nums[i] + nums[l] + nums[r] if abs(target - sum_val) < abs(diff): diff = target - sum_val if sum_val > target: r -= 1 else: l += 1 if diff == 0: # found matching one break return target - diff class Solution1: def threeSumClosest(self, nums: List[int], target: int) -> int: nums.sort() n = len(nums) diff = float("inf") for i in range(n): for j in range(i+1, n): compliment = target - nums[i] - nums[j] hi = bisect_right(nums, compliment, j+1) lo = hi - 1 if hi < n and abs(compliment - nums[hi]) < abs(diff): diff = compliment - nums[hi] if lo > j and abs(compliment - nums[lo]) < abs(diff): diff = compliment - nums[lo] if diff == 0: break return target - diff
[ "jinu.p.r@gmail.com" ]
jinu.p.r@gmail.com
f633dbcc7eef53a854a2e141672e28f1ab37b9de
5e3ebdb7e79f0c5e3a12350a673bbf7bbfd1db9f
/uglynumbers.py
896898e77cb7b5531d0ff2e6425f199425f9f876
[]
no_license
mragankyadav/LeetCode_Solved
bc9a851881bf46413967149db1900a450e68d4d3
a7fb9e33c949b2c01454dac72a7b49ff9c5a8842
refs/heads/master
2020-01-23T21:29:21.771168
2017-01-24T00:54:48
2017-01-24T00:54:48
74,701,981
0
0
null
null
null
null
UTF-8
Python
false
false
359
py
class Solution(object): def isUgly(self, num): """ :type num: int :rtype: bool """ while num>0 and num%2==0: num/=2 while num>0 and num%3==0: num/=3 while num>0 and num%5==0: num/=5 if num==1: return True else: return False
[ "mragank@tamu.edu" ]
mragank@tamu.edu
f0f09ab6b602a528bb64a79743ecc40eae73860b
5f789754182d84c17f6e9454f98bb8e6f2975244
/day25_reverseWordsInAString.py
25c7ee41101590ee2f5cf3203a603a92e41afac7
[]
no_license
jjcrab/code-every-day
fb0d218877bcd3a14c4ff16864a6545ddd9d8761
0a5b7af23cd8b5c3a2ede01f81d1c9e0e6bce59a
refs/heads/main
2023-07-03T12:46:53.391997
2021-08-10T05:26:34
2021-08-10T05:26:34
355,221,020
0
0
null
null
null
null
UTF-8
Python
false
false
761
py
# https: // leetcode.com/problems/reverse-words-in-a-string/ # Given an input string s, reverse the order of the words. # A word is defined as a sequence of non-space characters. The words in s will be separated by at least one space. # Return a string of the words in reverse order concatenated by a single space. # Note that s may contain leading or trailing spaces or multiple spaces between two words. The returned string should only have a single space separating the words. Do not include any extra spaces. Your reversed string should not contain leading or trailing spaces. def reverseWords(s): s_list = s.split() s_list.reverse() return " ".join(s_list) # return " ".join(reversed(s.split())) print(reverseWords(' I love you2 '))
[ "lily_lijj@hotmail.com" ]
lily_lijj@hotmail.com
acce580ac004eef6ef6088ce14949b4a15714965
b45af7416f42c7c74e35cfe4757c511d4fae3e1a
/read.py
621de762b2ced9b7ba6137ce7fd2b857bef1e129
[]
no_license
xzifan/stopSmoking
e25e9a48f957c00ae55761546a905cc0a98ca1e1
dbe0d704a675e581bd4ffe28fceeb6bcdfe50b54
refs/heads/master
2022-01-30T23:38:59.904859
2019-05-02T11:00:20
2019-05-02T11:00:20
null
0
0
null
null
null
null
UTF-8
Python
false
false
1,720
py
import sqlite3 import tweepy import json import time import networkx as nx import matplotlib.pyplot as plt with open('./twitter_credentials.json') as cred_data: info = json.load(cred_data) consumer_key = info['CONSUMER_KEY'] consumer_secret = info['CONSUMER_SECRET'] access_key = info['ACCESS_KEY'] access_secret = info['ACCESS_SECRET'] auth = tweepy.OAuthHandler(consumer_key, consumer_secret) api = tweepy.API(auth, wait_on_rate_limit=True, wait_on_rate_limit_notify=True) # read database conn = sqlite3.connect("C:/tweets.db") c = conn.cursor() c.execute("SELECT user_id, screen_name, text FROM tweets where text like '%smoking%';") ids = c.fetchall() print(len(ids)) print(c.fetchmany(5)) G = nx.DiGraph() for i in range(0, len(ids)): G.add_node(i, userID=ids[i][0], screen_name=ids[i][1], text = ids[i][2]) follow = [] for i in range(len(ids)): try: G.add_node(i, userID=ids[i][0]) followers = [] print("getting ", ids[i][1], "'s followers... ") # a = tweepy.Cursor(api.followers_ids,screen_name=ids[i][1]).items() for item in tweepy.Cursor(api.followers, screen_name=ids[i][1]).items(): # print(page.id) followers.append(item.id) print("followers: ",len(followers)) time.sleep(60) print("user ", i, " has ", len(followers), " followers") for idx in range(len(ids)): if ids[idx][0] in followers: follow.append((id[0], ids[i][0])) G.add_edge(idx, i) print("follow list: ", follow) except tweepy.TweepError as e: print(e.reason, " wait 60s ...") time.sleep(60) continue print(follow) nx.draw(G) plt.show()
[ "13607960126@139.com" ]
13607960126@139.com
952edeb51f5505fff59069d05c4f5e34a8d4ac4c
4b7d56fe3f5b2f532172b110bc1b05ae36c84468
/deprecated/MissingRuns.py
682528312a91d5847d311b94bb8894b91770d3c7
[]
no_license
digideskio/WmAgentScripts
fdfc80dc3c967c7a61b5bd7ef1f37cf0aed17761
6ead6971043fb25c3c43a667a1879370e6b3a689
refs/heads/master
2021-04-29T00:21:22.216242
2016-12-26T10:34:15
2016-12-26T10:34:15
null
0
0
null
null
null
null
UTF-8
Python
false
false
1,801
py
#!/usr/bin/env python import urllib2, urllib, httplib, sys, re, os, json from deprecated import phedexSubscription from deprecated import dbsTest from xml.dom.minidom import getDOMImplementation from das_client import get_data das_host='https://cmsweb.cern.ch' #Return true if a run is not present in any of the output datasets of a request, false if it is present in at least one def runsNotPresent(url, workflow): runWhitelist=deprecated.dbsTest.getRunWhitelist(url, workflow) newRunWhiteList=[] for run in runWhitelist: if not runNotinAllDatasets(url, run, workflow): newRunWhiteList.append(run) print run print newRunWhiteList def runNotinAllDatasets(url, run, workflow): Datasets=deprecated.phedexSubscription.outputdatasetsWorkflow(url, workflow) InputDataset=deprecated.dbsTest.getInputDataSet(url, workflow) runInputDataset=runInDataset(url, run, InputDataset) if not runInputDataset: return True for dataset in Datasets: if runInDataset(url, run, dataset):#the run is in at least one of the output datasets return True return False def runInDataset(url, run, dataset): query="file run="+str(run)+ " dataset="+dataset output = {} das_data = get_data(das_host,query,0,0,0) if isinstance(das_data, basestring): result = json.loads(das_data) else: result = das_data if result['status'] == 'fail' : print 'DAS query failed with reason:',result['reason'] else: if len(result['data'])>0: return True else: return False def main(): url='cmsweb.cern.ch' args=sys.argv[1:] if not len(args)==1: print "usage:runsMissing workflowname" sys.exit(0); workflow=args[0] runsNotPresent(url, workflow) sys.exit(0); if __name__ == "__main__": main()
[ "jbadillo@cern.ch" ]
jbadillo@cern.ch
f6f4481d578ca779d5f10316a55f8922b3002683
ef6cbc6ed0884ff22c055840d45772b29baf8258
/Lab2/Backups/interface 2.py
60398ad0737672112c110173b5b0148e3f29d4cb
[]
no_license
dsamue/DD1334Databas
a51b704c4e14705267285856f68b3406667ef5b9
76a0d737a360ab5fc4df644be51ddfa49f4df5e5
refs/heads/master
2020-03-06T15:06:18.050397
2018-03-27T07:50:41
2018-03-27T07:50:41
126,948,830
1
0
null
null
null
null
UTF-8
Python
false
false
9,099
py
#!/usr/bin/python # Lines that start as this one does are comments and have no effect on # the execution of the program. (Most added by John Folkesson to help # understand the code.) # When you import the pgdb module, all the # classes and functions in that module become available for you to # use. For example you can now use the pgdb.connect() function to # establish a connection to your copy of the database. # Possibly helpful example code is found at: # https://wiki.inf.ed.ac.uk/twiki/pub/DatabaseGroup/TeachingPythonPostgreGuide/dbexample.py.txt import pgdb from sys import argv # we define a class that we will use in our main program class DBContext: """DBContext is a small interface to a database that simplifies SQL. Each function gathers the minimal amount of information required and executes the query.""" # we first define the class, its definitions, functions attributes, # members ... what ever you like to call this stuff. Then way down at # the bottom of this file we will create on of these. # this __init___ operation is automatically called when the object is created. def __init__(self): #PG-connection setup print("AUTHORS NOTE: If you submit faulty information here, I am not responsible for the consequences.") # we need to call the connect function with the right parameters some # of wheich we 'hard code here such as the host, wnd others we call # the built in python function raw_input to get from the user. All are stored in a variable that we chose to call params. params = {'host':'nestor2.csc.kth.se', 'user':raw_input("Username: "), 'database':raw_input("Database: "), 'password':raw_input("Password: ")} self.conn = pgdb.connect(**params) # Here we create an attribute of our class (DBContex) called # menu as a list of strings. self.menu = ["Select.", "Insert.", "Remove.", "Exit"] # Here we create a cursor (see chap 9) and # http://www.python.org/dev/peps/pep-0249/ self.cur = self.conn.cursor() # Here we define a member function that we can later call repeatidly def print_menu(self): """Prints a menu of all functions this program offers. Returns the numerical correspondant of the choice made.""" for i,x in enumerate(self.menu): print("%i. %s"%(i+1,x)) # this get_int function is defined below return self.get_int() def get_int(self): """Retrieves an integer from the user. If the user fails to submit an integer, it will reprompt until an integer is submitted.""" while True: # we go round here untill we get to return (ie while True) # The try statement works as follows. First, the try # clause (the statement(s) between the try and except # keywords) is executed. If no exception occurs, the except # clause is skipped and execution of the try statement is # finished. If an exception occurs during execution of the # try clause, the rest of the clause is skipped. Then if # its type matches the exception named after the except # keyword, the except clause is executed, and then # execution continues after the try statement. If an # exception occurs which does not match the exception named # in the except clause, it is passed on to outer try # statements; if no handler is found, it is an unhandled # exception and execution stops with a message as shown # above. try: # an Error here (ie wrong input type) jumps to except choice = int(input("Choose: ")) if 1 <= choice <= len(self.menu): return choice # here we had a number but it was out of range print("Invalid choice.") except (NameError,ValueError, TypeError, SyntaxError): print("That was not a number, genious.... :(") # This function will be called if the user choses select. def select(self): """Finds and prints tuples. Will query the user for the information required to identify a tuple. If the filter field is left blank, no filter will be used.""" # raw_input returns the entire line entered at the promt. The # split(",") method then creates a list of the relations # (tables) that you have separated by commas. The strip # method just remove the white space. So this line is read # from right to left, that is first the user input is parsed # into a list of names, then the x is set to the list contents # incremented thru the list then the current x is striped and # the words " natural join " are added to the long string # being defined and stored in the variable tables. tables = [x.strip() + " natural join " for x in raw_input("Choose table(s): ").split(",")] # Here we do some char pointer tricks to remove the extra " natural # join " (14 characters tables[len(tables)-1] = tables[len(tables)-1][0:len(tables[len(tables)-1])-14] # pring the result to the screen print tables # here columns becomes the string that you type at prompt for Choose columns. columns = raw_input("Choose column(s): ") print columns #list comprehension building a list ready to be reduced into a string. filters = raw_input("Apply filters: ") # This will set query to the long string "SELECT columns FROM # tables WHERE filters;" The %s indicate that a string from a # variable will be inserted here, Those string variables # (actually expressions here) are then listed at the end # separated by commas. # lambda is a python keyword for defining a function (here with a+b) # reduce is a python built in way to call a function on a list # (iterable) (here each element of columns is taken as b in turn # join is the python way to concatenate a list of strings try: query = """SELECT %s FROM %s%s;"""%(reduce(lambda a,b:a+b,columns), "".join(tables), "" if filters == "" else " WHERE %s"%filters) except (NameError,ValueError, TypeError,SyntaxError): print " Bad input." return print(query) # Here we do the select query at the cursor # No errors are caught so this crashes horribly on malformed queries self.cur.execute(query) # This function is defined below self.print_answer() #OK now you do the next two: def remove(self): """Removes tuples. Will query the user for the information required to identify a tuple. If the filter field is left blank, no filters will be used.""" #Ok, vi behover fa tables och Where-sats. table = raw_input("Choose table: ") print table filters = raw_input("Apply filters: ") print filters try: query = """DELETE FROM %s WHERE (%s);"""%(table, filters) except (NameError,ValueError, TypeError,SyntaxError): print " Bad input." return print query self.cur.execute(query) pass def insert(self): """inserts tuples. Will query the user for the information required to create tuples.""" """Ok, so vi beover veta table och varden""" table = raw_input("Choose table: ") print table values = raw_input("Type value(s): ") print values try: query = """INSERT INTO %s VALUES (%s);"""%(table, values) except (NameError,ValueError, TypeError,SyntaxError): print " Bad input." return print query self.cur.execute(query) pass def exit(self): self.cur.close() self.conn.close() exit() def print_answer(self): # We print all the stuff that was just fetched. print("\n".join([", ".join([str(a) for a in x]) for x in self.cur.fetchall()])) # we call this below in the main function. def run(self): """Main loop. Will divert control through the DBContext as dictated by the user.""" actions = [self.select, self.insert, self.remove, self.exit] while True: try: # So this is executed right to left, The print_menu # function is run first (defined above), then the # return value is used as an index into the list # actions defined above, then that action is called. actions[self.print_menu()-1]() print except IndexError: # if somehow the index into actions is wrong we just loop back print("Bad choice") # This strange looking line is what kicks it all off. So python reads until it sees this then starts executing what comes after- if __name__ == "__main__": db = DBContext() db.run()
[ "samuelsson87@hotmail.com" ]
samuelsson87@hotmail.com
52bd92309b601393a15a005282bf1640f7a7c6f3
87b395719cc11ce9fd1a280cd0716a31fb5b4188
/bgmi/__init__.py
fb5242968532f4778e0a916feb66e57767ad2fe4
[ "MIT" ]
permissive
awesome-archive/BGmi
5f9fde96677904807fe16cbf68f7c476223329a6
313f8fd879e9daa141e76dea195563a42bd66b81
refs/heads/master
2020-12-03T00:42:43.446389
2017-06-14T12:14:21
2017-06-14T12:14:21
null
0
0
null
null
null
null
UTF-8
Python
false
false
101
py
# coding=utf-8 __author__ = 'RicterZ' __email__ = 'ricterzheng@gmail.com' __version__ = '1.3.0-beta'
[ "ricterzheng@gmail.com" ]
ricterzheng@gmail.com
bc51cf4ac7cf98eb4abd1c542716aa14f4c63b33
a81d21f98dd558416f8731f001cb8151d8309f4f
/interviewbit/interviewbit/stringm/justified_text.py
83236e4d76250c48e0b756f398a134e2b635a76d
[]
no_license
marquesarthur/programming_problems
1128c38e65aade27e2435f7987d7ee2b328fda51
2f7df25d0d735f726b7012e4aa2417dee50526d9
refs/heads/master
2022-01-25T18:19:02.575634
2022-01-18T02:07:06
2022-01-18T02:07:06
32,213,919
2
0
null
2020-10-13T01:29:08
2015-03-14T13:44:06
Python
UTF-8
Python
false
false
2,041
py
class Solution: def justifyLines(self, lines, L): result = [] for line in lines: curr = len(" ".join(line)) if len(line) == 1: result.append(line[0] + " " * (L - curr)) else: to_all = (L - curr) // (len(line) - 1) additional = (L - curr) % (len(line) - 1) res = line.pop(0) for word in line: res += " " * (to_all + 1) if additional > 0: res += " " additional -= 1 res += word result.append(res) return result # @param A : list of strings # @param B : integer # @return a list of strings def fullJustify(self, A, B): lines = [] partial = [] for word in A: if len(" ".join(partial + [word])) <= B: partial.append(word) else: lines.append(partial) partial = [word] if partial: lines.append(partial) result = self.justifyLines(lines, B) return result # print(Solution().fullJustify(["This", "is", "an", "example", "of", "text", "justification."], 16)) # print(Solution().fullJustify(["What", "must", "be", "shall", "be."], 12)) # # A = [ "glu", "muskzjyen", "ahxkp", "t", "djmgzzyh", "jzudvh", "raji", "vmipiz", "sg", "rv", "mekoexzfmq", "fsrihvdnt", "yvnppem", "gidia", "fxjlzekp", "uvdaj", "ua", "pzagn", "bjffryz", "nkdd", "osrownxj", "fvluvpdj", "kkrpr", "khp", "eef", "aogrl", "gqfwfnaen", "qhujt", "vabjsmj", "ji", "f", "opihimudj", "awi", "jyjlyfavbg", "tqxupaaknt", "dvqxay", "ny", "ezxsvmqk", "ncsckq", "nzlce", "cxzdirg", "dnmaxql", "bhrgyuyc", "qtqt", "yka", "wkjriv", "xyfoxfcqzb", "fttsfs", "m" ] # B = 144 # # s = Solution().fullJustify(A, B) # print(s[0] == "glu muskzjyen ahxkp t djmgzzyh jzudvh raji vmipiz sg rv mekoexzfmq fsrihvdnt yvnppem gidia fxjlzekp uvdaj ua pzagn bjffryz nkdd osrownxj")
[ "marques.art@gmail.com" ]
marques.art@gmail.com
c514cf956ce40ceda2afe2b4381d34f8b7f6f3b9
f6563f98fe519b95338fe5e741b7e85dbdd74c6e
/schedule/migrations/0002_talkschedule_day_session.py
4e212d87a7c09bf11fd505e7c0e284b8e5ee75ca
[]
no_license
PyZim/pyconzim_website
103a02327cbdf64ac635a0d89ef744dff826e2ed
6a6b4b85201a53202ab8ada3fb9d7e85115437da
refs/heads/master
2021-01-20T01:54:27.346355
2018-02-10T10:38:46
2018-02-10T10:38:46
89,343,552
3
6
null
2022-03-16T06:10:04
2017-04-25T09:39:39
JavaScript
UTF-8
Python
false
false
548
py
# -*- coding: utf-8 -*- # Generated by Django 1.10.3 on 2017-07-20 21:18 from __future__ import unicode_literals from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('schedule', '0001_initial'), ] operations = [ migrations.AddField( model_name='talkschedule', name='day_session', field=models.CharField(choices=[('Morning', 'Morning'), ('Afternoon', 'Afternoon'), ('Evening', 'Evening')], default='', max_length=10), ), ]
[ "amakarudze@gmail.com" ]
amakarudze@gmail.com
d43289e5edc8bd631adc373c22939e8abb133a42
6fcbf5644e61cebcdb0dbd92b517418eab5b0977
/teercontroller/teercontroller/__init__.py
f161d9ee983bd101692207aab734cd95a0f325dd
[]
no_license
maraid/teercontroller
18a6ad2f4fceec10b29720e39e1634bbb73a63dd
565c582accdd79a4e4bf007b7e2114cc64b9ab07
refs/heads/master
2020-03-08T06:30:53.522765
2019-06-24T21:22:03
2019-06-24T21:22:03
127,973,594
0
1
null
null
null
null
UTF-8
Python
false
false
45
py
from PhaseController import PhaseController
[ "danielmarai@yahoo.com" ]
danielmarai@yahoo.com
ae6a31af43390beff2a2eca53b5a0ffcc2ffd36f
d13c7805aa0e2d6ea110321770a29447b062cfd3
/accounts/views.py
5f319b5c2ab6247a46ffd303cf3a59c5a31ad08b
[]
no_license
josefinafortin/ICS491_Summer2019_TeamJS
5edbfc984ea33b87cb45c3bd69f47e0cd1c66f74
b9251684d1fea0af2f1f2d60db38f332c0ddeac4
refs/heads/master
2020-06-01T16:57:59.703194
2019-06-27T19:21:53
2019-06-27T19:21:53
190,857,621
0
0
null
null
null
null
UTF-8
Python
false
false
1,477
py
from django.shortcuts import render, redirect from django.contrib.auth.forms import UserCreationForm, AuthenticationForm from django.contrib.auth import login, logout from django.contrib.auth.decorators import login_required # Create your views here. def signup_view(request): if request.method == 'POST': form = UserCreationForm(request.POST) if form.is_valid(): user = form.save() login(request, user) return redirect('accounts:userhome') else: form = UserCreationForm() return render(request, 'accounts/signup.html', { 'form': form }) @login_required(login_url="/accounts/login") def user_homepage(request): return render(request, 'accounts/userhome.html') def login_view(request): if request.method == 'POST': form = AuthenticationForm(data=request.POST) if form.is_valid(): # log the user in user = form.get_user() login(request, user) return redirect('accounts:userhome') else: form = AuthenticationForm() return render(request, 'accounts/login.html', { 'form': form }) @login_required(login_url="/accounts/login") def user_homepage(request): username = request.user context = { "Uname": username } return render(request, 'accounts/userhome.html', context) def logout_view(request): if request.method == 'POST': logout(request) return redirect('accounts:login')
[ "noreply@github.com" ]
josefinafortin.noreply@github.com
7c73ad30514391476c8f79a8838afba38d502606
fc716c1605d22438fbf363fedb5be56f2e8cf9ce
/training/train.py
7f4b4db6eee2f99229ca44c679c4f1b0384677f8
[]
no_license
vimiomori/graduation-thesis
db12bc574d0e6a318c8b2f2e3e4f0f5e6bb304b0
bb91baf8817e070b50c19fc259f0381512ccb0d4
refs/heads/master
2023-01-24T06:17:42.055364
2020-12-01T02:21:05
2020-12-01T02:21:05
295,306,720
0
0
null
null
null
null
UTF-8
Python
false
false
1,589
py
# -*- coding: utf-8 -*- # -*- coding: UTF-8 -*- import time import csv import pickle import sys from gensim.models import Word2Vec from evaluate import get_score def main(sg, window, size): start = time.time() print(">"*10, "Preparing corpuses", "<"*10) print("Loading 助詞あり corpus") with open('joshi_ari.pkl', 'rb') as f: ari_corpus = pickle.load(f) print("Loading 助詞なし corpus") with open('joshi_nashi.pkl', 'rb') as f: nashi_corpus = pickle.load(f) # print(f"Corpuses loaded, time elapsed: {((time.time() - start)/60):.2f} mins") with open("tuning_results.csv", "a", newline="") as f: wr = csv.writer(f) print("-" * 10, f"Beginning to train with params: window: {window}, size: {size}, sg: {sg}", "-" * 10) print("Training 助詞なし model") nashi_model = Word2Vec(nashi_corpus, window=window, size=size, sg=sg) print("Training 助詞あり model") ari_model = Word2Vec(ari_corpus, window=window, size=size, sg=sg) print("Calculating scores") ari_score, nashi_score = get_score(ari_model.wv, nashi_model.wv) print("*" * 30) print(f"Results: 助詞あり score: {ari_score}, 助詞なし score: {nashi_score}") print(f"Total elapsed time for this iteration: {((time.time() - start)/60):.2f} mins") print("*" * 30) wr.writerow([sg, window, size, ari_score, nashi_score]) if __name__ == "__main__": sg = int(sys.argv[1]) window = int(sys.argv[2]) size = int(sys.argv[3]) main(sg, window, size)
[ "vivian.muchen@gmail.com" ]
vivian.muchen@gmail.com
b372fec1a5affa6271d68a4e50411b92c0a41c40
6c4d230ff798de61ca7fd4d72b2a779e00506e0b
/pioneer/envs/pioneer/pioneer_env.py
41bc422a592237660428e9c0a23718c1e556a410
[ "MIT" ]
permissive
MrNikStr/pioneer
d69292bd112d39fb8bf5a286d053a913bfb929f9
0912fd7b569073721d15a3e9843f80bde6da6093
refs/heads/master
2022-11-23T16:45:55.105469
2020-07-24T15:28:41
2020-07-24T15:28:41
null
0
0
null
null
null
null
UTF-8
Python
false
false
6,158
py
import math import os from dataclasses import dataclass from time import sleep from typing import Optional, Tuple, Dict, List import numpy as np from gym import spaces from gym.utils import seeding from pioneer.envs.bullet import BulletEnv, RenderConfig, SimulationConfig, Scene Action = np.ndarray Observation = np.ndarray @dataclass class PioneerConfig: velocity_factor: float = 0.2 potential_scale: float = 5.0 step_penalty: float = 1 / 125 stop_distance: float = 0.1 class PioneerEnv(BulletEnv[Action, Observation]): def __init__(self, headless: bool = True, pioneer_config: Optional[PioneerConfig] = None, simulation_config: Optional[SimulationConfig] = None, render_config: Optional[RenderConfig] = None): model_path = os.path.join(os.path.dirname(__file__), 'assets/pioneer_6dof.urdf') BulletEnv.__init__(self, model_path, headless, simulation_config, render_config) self.config = pioneer_config or PioneerConfig() # TODO: self.action_space = self.joint_velocities_space(self.scene, self.config.velocity_factor) self.action_space = self.joint_positions_space(self.scene) self.observation_space = self.observation_to_space(self.observe()) self.reward_range = (-float('inf'), float('inf')) self.np_random = None self.seed() self.best_distance = math.inf def seed(self, seed=None) -> List[int]: self.np_random, seed = seeding.np_random(seed) return [seed] # TODO: # # def act(self, action: Action) -> Tuple[float, bool, Dict]: # pointer_coords = np.array(self.scene.items_by_name['robot:pointer'].pose().xyz) # target_coords = np.array(self.scene.items_by_name['target'].pose().xyz) # diff = target_coords - pointer_coords # distance = np.linalg.norm(diff) # # reward = 0 # if distance < self.best_distance: # reward += self.potential(distance) - self.potential(self.best_distance) # self.best_distance = distance # reward -= self.config.step_penalty # # done = distance < self.config.stop_distance # # action_list = list(action) # assert len(action_list) == len(self.scene.joints) # for velocity, joint in zip(action_list, self.scene.joints): # joint.control_velocity(velocity) # # self.world.step() # return reward, done, dict() def act(self, action: Action) -> Tuple[float, bool, Dict]: pointer_coords = np.array(self.scene.items_by_name['robot:pointer'].pose().xyz) target_coords = np.array(self.scene.items_by_name['target'].pose().xyz) diff = target_coords - pointer_coords distance = np.linalg.norm(diff) reward = -distance done = distance < self.config.stop_distance action_list = list(action) assert len(action_list) == len(self.scene.joints) for position, joint in zip(action_list, self.scene.joints): joint.control_position(position) self.world.step() return reward, done, dict() def observe(self) -> Observation: pointer_coords = np.array(self.scene.items_by_name['robot:pointer'].pose().xyz) target_coords = np.array(self.scene.items_by_name['target'].pose().xyz) diff = target_coords - pointer_coords distance = np.linalg.norm(diff) joint_positions = np.array([x.position() for x in self.scene.joints]) joint_lower_limits = np.array([x.lower_limit for x in self.scene.joints]) joint_upper_limits = np.array([x.upper_limit for x in self.scene.joints]) return np.concatenate([ joint_positions, joint_lower_limits, joint_upper_limits, joint_positions - joint_lower_limits, joint_upper_limits - joint_positions, np.cos(joint_positions), np.sin(joint_positions), np.cos(joint_lower_limits), np.sin(joint_lower_limits), np.cos(joint_upper_limits), np.sin(joint_upper_limits), pointer_coords, target_coords, diff, np.array([distance]) ]) def potential(self, distance: float) -> float: return 1 / (distance + 1 / self.config.potential_scale) @staticmethod def joint_positions_space(scene: Scene) -> spaces.Space: lower_limits = np.array([x.lower_limit for x in scene.joints], dtype=np.float32) upper_limits = np.array([x.upper_limit for x in scene.joints], dtype=np.float32) return spaces.Box(low=lower_limits, high=upper_limits, dtype=np.float32) @staticmethod def joint_velocities_space(scene: Scene, velocity_factor: float) -> spaces.Space: lower_limits = np.array([x.lower_limit for x in scene.joints], dtype=np.float32) upper_limits = np.array([x.upper_limit for x in scene.joints], dtype=np.float32) distance = upper_limits - lower_limits velocity = distance * velocity_factor return spaces.Box(low=-velocity, high=velocity, dtype=np.float32) @staticmethod def observation_to_space(observation: Observation) -> spaces.Space: low = np.full(observation.shape, -float('inf'), dtype=np.float32) high = np.full(observation.shape, float('inf'), dtype=np.float32) return spaces.Box(low, high, dtype=observation.dtype) if __name__ == '__main__': env = PioneerEnv(headless=False) env.reset_gui_camera() env.act(np.array([1.0, 1.0, 1.0, 1.0, 1.0, 1.0])) # target_joint = env.scene.joints_by_name['robot:arm3_to_rotator3'] # target_joint.control_velocity(velocity=1.0) while True: # rr = target_joint.upper_limit - target_joint.lower_limit # if target_joint.position() < target_joint.lower_limit + 0.01 * rr: # target_joint.control_velocity(velocity=1.0) # # if target_joint.position() > target_joint.upper_limit - 0.01 * rr: # target_joint.control_velocity(velocity=-1.0) env.world.step() sleep(env.world.timestep)
[ "alexander@fluence.one" ]
alexander@fluence.one
26b983bf3e2b84ea70ee0f38e0f4cf03b6302db5
c91d2b1de58ec41f0f91fe7e46f6fb952f5e9192
/time_track_weather/time_track_weather.py
55a505be3602407dcd012e4990f0a7165a47113f
[]
no_license
publicaldo/PiPlay
384e3ba494af24b396040431945918ed388f492b
cdd25ce8f1ae03830752ae0e7f2fe2f1c0539fb9
refs/heads/main
2023-05-23T12:01:17.630071
2021-02-19T08:26:13
2021-02-19T08:26:13
338,565,473
0
0
null
2023-05-02T10:03:14
2021-02-13T12:04:01
Python
UTF-8
Python
false
false
4,129
py
import sys sys.path.append("lib") # from waveshare_epd import epd4in2_V2 # pi only: import os from datetime import datetime import requests picdir = os.path.join(os.path.dirname(os.path.realpath(__file__)), "pic") icondir = os.path.join(picdir, "icon") fontdir = os.path.join(os.path.dirname(os.path.realpath(__file__)), "font") API_KEY = "13cd0998435bbe45bf1c548c70384176" CITY = "Munich" STATE = "" COUNTRY = "DE" LATITUDE = "48.137154" LONGITUDE = "11.576124" def get_date(): now = datetime.now() current_date = now.strftime("%a, %b %d") return(current_date) def get_time(): now = datetime.now() current_time = now.strftime("%I:%M %p") return(current_time) def get_weather(): url = f"https://api.openweathermap.org/data/2.5/onecall?lat={LATITUDE}&lon={LONGITUDE}&appid={API_KEY}&units={'imperial'}" response = requests.get(url) if not response.ok: raise Exception(response.json()["message"]) body = response.json() # this text works, but is a bit clumsy? current_temp = round(body["current"]["temp"]) current_conditions = [condition["main"] for condition in body["current"]["weather"]][0] # I'd like to return a link to the icon? or specific font character for font. hi_today = [round(day["temp"]["max"]) for day in body["daily"]][0] lo_today = [round(day["temp"]["min"]) for day in body["daily"]][0] hi_tomorrow = [round(day["temp"]["max"]) for day in body["daily"]][1] lo_tomorrow = [round(day["temp"]["min"]) for day in body["daily"]][1] # conditions_tomorrow = [condition["main"] for condition in body["daily"]][1:2] # don't know how to get tomorrow's conditions return current_temp, current_conditions, hi_today, lo_today, hi_tomorrow, lo_tomorrow def get_now_playing(): url = f"http://hifiberry.local:81/api/track/metadata" response = requests.get(url) if not response.ok: raise Exception(response.json()["message"]) body=response.json() title = body['title'] artist = body['artist'] releaseDate = body['releaseDate'] # if releaseDate=none then make it "" # if releaseDate == none: # releaseDate = "" playerName = body['playerName'] if playerName == "mpd": playerName = "Radio" if playerName == "ShairportSync": playerName = "AirPlay" # THere's got to be a case or list translate for this # also - if Radio, can we get the channel name? # - maybe brute force it from the url return title, artist, releaseDate, playerName #Daily calendar loop # get formatted date and timne current_date=get_date() current_time=get_time() # at this time / in this loop for this development exercise # update date section # print(current_date) # commented for this dev exercise print(current_date, current_time, sep=' - ') # separate time into minute loop after dev print() #1 minute time loop # get formatted time # update time section # print(current_time) #15 minute weather loop# # Get weather (current_temp, current_conditions, hi_today, lo_today, hi_tomorrow, lo_tomorrow)=get_weather() # update weather section print("Weather") print("Current: ", current_temp, "and", current_conditions) print("Today: high", hi_today, "low", lo_today) print("Tomorrow: high", hi_tomorrow, "low", lo_tomorrow) #5 second now playing loop # get now playing (title, artist, releaseDate, playerName)=get_now_playing() # check if new - artist, title should be enough # if yes, update song section print() print(playerName) print(title, artist, releaseDate, sep=' | ') # if(releaseDate') = "none", releaseDate="" # example timing and control loop from Pythn turtle clock example #def tick(): # t = datetime.today() # second = t.second + t.microsecond*0.000001 # minute = t.minute + second/60.0 # hour = t.hour + minute/60.0 # try: # tracer(False) # Terminator can occur here # writer.clear() # writer.home() # #def main(): # tracer(False) # setup() # tracer(True) # tick() # return "EVENTLOOP" # #if __name__ == "__main__": # mode("logo") # msg = main() # print(msg) # mainloop()
[ "76871019+publicaldo@users.noreply.github.com" ]
76871019+publicaldo@users.noreply.github.com
a1a2c4ef7285f0b3cfb5f92a0c22724e95a2474a
ca5afb3906fca1a9692891b23073e51b63dede5e
/NetMM/Server/Server.py
a819b47f1d04aca16e9e54d18c83e08b75ce7374
[]
no_license
hadarrabin/NetMangement-Final-version
87d44bd1b09eb86eaa5adcf341646affc6e08e3e
a0fb40d9c59284112e9120cffc18fb4e300b93f5
refs/heads/master
2021-01-01T05:10:06.213917
2016-05-29T17:00:00
2016-05-29T17:00:00
59,429,087
0
0
null
null
null
null
UTF-8
Python
false
false
10,423
py
__author__ = 'Hadar' import socket import Aes import threading from Rsa import * from Crypto import * import time import pickle from base64 import b64decode, b64encode import time, sys import struct import pyodbc # region ---------- C O N S T A N T S ------------------------------------------------------------------------------------------------ PORT = 5070 LEN_UNIT_BUF = 2048 # Min len of buffer for receive from server socket MAX_ENCRYPTED_MSG_SIZE = 128 MAX_SOURCE_MSG_SIZE = 128 END_LINE = "\r\n" LOCAL_ADDRESS = "0.0.0.0" IF_CLIENT_NOT_CONNECTED = True class server(): def __init__(self, path): self.socket = socket.socket() self.client_keys = {} self.crypto = Crypto() self.f = open(r'\\.\pipe\myPipee', 'r+b', 0) self.dbcursor = self.connectdb(path) def connectdb(self, path): ACCESS_DATABASE_FILE = 'C:\\codeing\\NetMangerUp-master-master\\Server_Gui\\Server_Gui\\bin\\Debug\\ComputersBasicD.accdb' ODBC_CONN_STR = 'DRIVER={Microsoft Access Driver (*.mdb, *.accdb)};DBQ=%s;' % ACCESS_DATABASE_FILE """try: ACCESS_DATABASE_FILE = path + r'\ComputersBasicD.accdb' ODBC_CONN_STR = 'DRIVER={Microsoft Access Driver (*.mdb, *.accdb)};DBQ=%s;' % ACCESS_DATABASE_FILE conn = pypyodbc.connect(ODBC_CONN_STR) print 'Success connect to DB' except Exception,ex: print ex""" conn = pyodbc.connect(ODBC_CONN_STR) cur = conn.cursor() return cur def writeTGui(self, s): self.f.write(struct.pack('I', len(s)) + s) self.f.seek(0) def readFGui(self): n = struct.unpack('I', self.f.read(4))[0] s = self.f.read(n) self.f.seek(0) return s def key_exchange(self,client_socket): if self.crypto.private_key.can_encrypt(): #-------------------- 1 ------------------------------------------------------------------------ # ------------ Send server publicKey client_socket.send(pickle.dumps(self.crypto.private_key.publickey()) + END_LINE) time.sleep(0.5) # ----------- send Base64 Hash of self.crypto.private_key.publickey() client_socket.send( b64encode(SHA256.new(pickle.dumps(self.crypto.private_key.publickey())).hexdigest()) + END_LINE) time.sleep(0.5) #-------------------- 2 ------------------------------------------------------------------------ # -------------- Wait client private key -------------------------------------------------------- # get Pickled private key pickled_client_private_key = client_socket.recv(LEN_UNIT_BUF).split(END_LINE)[0] client_private_key = pickle.loads(pickled_client_private_key) # -------------- Wait client hash private key --------------------------------------------------------------------------- # Hashing original client private key calculated_hash_client_pickled_private_key = SHA256.new(pickle.dumps(client_private_key)).hexdigest() declared_hash_client_pickled_private_key = b64decode( client_socket.recv(LEN_UNIT_BUF).split(END_LINE)[0] ) if calculated_hash_client_pickled_private_key != declared_hash_client_pickled_private_key: print "Error : hash and original" return client_private_key = RSA.importKey(client_private_key) ''' Due to a bug in pyCrypto, it is not possible to decrypt RSA messages that are longer than 128 byte. To overcome this problem, the following code receives the encrypted data in chunks of 128 byte. We need to think how to tell the students about this behavior (another help message?) And maybe we should implemented this approach in level 3 as well... ''' #-------------------- 3 ------------------------------------------------------------------------ # -------------- Receive from client in parts message # -------------- encrypted by server public key info containing symmetric key and hash symmetric key encrypted by client public key pickled_client_key = '' pickled_encrypted_client_key = '' # Recieve from client number of encrypted message parts msg_parts = client_socket.recv(LEN_UNIT_BUF).split(END_LINE)[0] for i in xrange(int(msg_parts)): # Wait from client current part of encrypt client_key part_pickled_encrypted_client_key = client_socket.recv(LEN_UNIT_BUF).split(END_LINE)[0] pickled_encrypted_client_key += part_pickled_encrypted_client_key # Decryption current part of encrypt client_key part_encrypt_client_key = pickle.loads(part_pickled_encrypted_client_key) part_pickled_client_key = self.crypto.private_key.decrypt(part_encrypt_client_key) pickled_client_key += part_pickled_client_key items = pickled_client_key.split('#') client_sym_key_original = b64decode(items[0]) print 'Client Sym Key Original : ' + client_sym_key_original print len(client_sym_key_original) #-------- Extract Client Hash Sym Key client_encrypted_hash_sym_key = b64decode(items[1]) client_encrypted_hash_sym_key = pickle.loads(client_encrypted_hash_sym_key) splitted_client_encrypted_hash_sym_key = [client_encrypted_hash_sym_key[i:i+MAX_ENCRYPTED_MSG_SIZE] for i in xrange(0, len(client_encrypted_hash_sym_key), MAX_ENCRYPTED_MSG_SIZE)] msg_parts = len(splitted_client_encrypted_hash_sym_key) client_hash_sym_key = '' for i in xrange(int(msg_parts)): # Decryption current part of encrypt client_key part_client_encrypted_hash_sym_key = client_private_key.decrypt(splitted_client_encrypted_hash_sym_key[i]) client_hash_sym_key += part_client_encrypted_hash_sym_key print 'Client Hash Sym Key : ' + client_hash_sym_key calculated_client_sym_key_original = SHA256.new(client_sym_key_original).hexdigest() if calculated_client_sym_key_original != client_hash_sym_key: print "Error : hash and original" return client_sym_key_original def sendTclient(self, csocket, data): encrypted_data = self.client_keys[csocket].encrypt_data(data) csocket.send(encrypted_data) def recvFclient(self, csocket): encrypted_data = csocket.recv(LEN_UNIT_BUF) data = self.client_keys[csocket].decrypt_data(encrypted_data) return data def SessionWithClient(self, csocket, Ip): self.client_keys[csocket].append(Ip) Accv = self.key_exchange(csocket) self.client_keys[csocket] = Accv AccvO = Security.Aes.AESK(Accv) self.client_keys[csocket] = AccvO UUID = self.recvFclient(csocket) user_name = self.recvFclient(csocket) os_version = self.recvFclient(csocket) processor = self.recvFclient(csocket) cpus_num = self.recvFclient(csocket) RAM_size = self.recvFclient(csocket) disk_C_size = self.recvFclient(csocket) self.writeTGui("New client arrives") self.writeTGui(Ip) self.writeTGui(Ip + ":UUID:"+UUID) self.writeTGui(Ip + ":user name:"+user_name) self.writeTGui(Ip + ":processor:"+processor) self.writeTGui(Ip + ":cpus num:"+cpus_num) self.writeTGui(Ip + ":RAM size:"+RAM_size) self.writeTGui(Ip + ":disk C size:"+disk_C_size) def start(self): self.socket.bind(('0.0.0.0', PORT)) self.socket.listen(5) while True: client_socket, client_address = self.socket.accept() s = threading.Thread(target=self.SessionWithClient, args=(client_socket,client_address[0],)) s.start() if(IF_CLIENT_NOT_CONNECTED == True): IF_CLIENT_NOT_CONNECTED = False c = threading.Thread(target=self.Continues()) c.start() def Continues(self): while True: sockbool = True commend = self.readFGui() print commend Ipindex = commend.find(":") Ip = commend[:Ipindex] command = commend[Ipindex:] csocket = None for s in self.client_keys.keys(): if self.client_keys[s][1] == Ip: csocket = s if csocket == None: sockbool = False if sockbool == True: self.work(csocket,command) #commend will be sent to a class the will translate it to a command for the client def work(self,csocket,command): Ip = self.client_keys[csocket][1] print "command recieved" if command == "Total Using" : self.sendTclient(csocket,command) using = self.recvFclient(csocket) self.writeTGui(Ip + ":using:" +using) if command == "Get Process List": self.sendTclient(csocket,command) lengf = self.recvFclient() x = int(lengf) for i in range(x): PId = self.recvFclient(csocket) Namee = self.recvFclient(csocket) Usingg = self.recvFclient(csocket) self.dbcursor.execute(""""INSERT INTO Table1(PID \ ,Pname,Using)\ VALUES ('%s', '%s', '%s')""" %(PId,Namee,Usingg)) self.dbcursor.commit() def task(ser,st,csocket, ip): print "recieved command" if st is not None: print "Refresh processes" ser.sendTclient(csocket,"Refresh processes list") lenp = ser.recvFclient(csocket) print lenp ser.writeTGui(lenp) for i in range(lenp): for l in range(3): n = ser.recvFclient(csocket) print n ser.writeTGui(n) ser.writeTGui(ser.recvFclient(csocket)) ser.Continues(csocket,ip) def main(argv): raw_input('2') time.sleep(1) ser = server(argv) raw_input("success initalizing") ser.start() if __name__ == "__main__": raw_input('1') main(sys.argv[1])
[ "hadarrabin1@gmail.com" ]
hadarrabin1@gmail.com
e2bd4e7ede37958dddd801bd523cf0e0eb2a8e41
58fdfbf96e679fe2a4d0a44528b718439d2ad9b8
/Code/example.py
32fc89bbdf938ecbb5be6ffa4c3aa2ec94e277cf
[]
no_license
HifzaZaheer96/Assessment
e687505105ed8637f68d6928af837f47948f1675
81537c6d5186aec9143bb8b9bf7d160440a3d0cf
refs/heads/master
2021-02-05T11:48:23.083774
2020-02-28T14:49:17
2020-02-28T14:49:17
243,776,746
0
0
null
null
null
null
UTF-8
Python
false
false
228
py
#!/usr/bin/env python3 def endsPy(input): for i in range(len(input)): if input[i-1] == "y" or input[i-1] == "Y" and input[i-2] == "p" or input[i-2] =="P": return True else: return False
[ "Hifza.Zaheer@academytrainee.com" ]
Hifza.Zaheer@academytrainee.com
31c00dc69228314be9ba1332f3507e82b6d983be
3211dbbffe77b4c5b768387377e0c9c71c73ecc9
/crud_project/urls.py
7edc15604bae787d97bb0f41a8d4e5e38f70c004
[]
no_license
jonndoe/blog_project
231de09c13eaf0b1c537f713853786bf28a6f2b1
a867c4faba5e6ae7039cb6b2cbd9a3fc45e4ef73
refs/heads/master
2020-09-16T10:35:54.753112
2019-11-27T01:27:56
2019-11-27T01:27:56
223,743,912
0
0
null
null
null
null
UTF-8
Python
false
false
1,246
py
"""crud_project URL Configuration The `urlpatterns` list routes URLs to views. For more information please see: https://docs.djangoproject.com/en/2.2/topics/http/urls/ Examples: Function views 1. Add an import: from my_app import views 2. Add a URL to urlpatterns: path('', views.home, name='home') Class-based views 1. Add an import: from other_app.views import Home 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home') Including another URLconf 1. Import the include() function: from django.urls import include, path 2. Add a URL to urlpatterns: path('blog/', include('blog.urls')) """ from django.conf import settings from django.conf.urls.static import static from django.contrib import admin from django.urls import path, include urlpatterns = [ # django admin path('admin/', admin.site.urls), # user management path('accounts/', include('allauth.urls')), # local apps path('', include('pages.urls')), path('crudobjects/', include('crudobjects.urls')), ] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT) if settings.DEBUG: import debug_toolbar urlpatterns = [ path('__debug__/', include(debug_toolbar.urls)), ] + urlpatterns
[ "user@mail.com" ]
user@mail.com
c2638624790408ee92cbef57b6e031ff2ab99905
23a56e0555d6b27aa444d8396ec32f9d2b678a39
/06_volmod_hd/voxelspace_02/voxelspace_02.pyde
b28db102c94e7cb852037851d713d890168ff440
[]
no_license
dtbinh/T1_python-exercises
2ce1b01bc71f8032bbe8fb4ef8f71b648dcde1c5
f4710c3dc2ba8ddb3e3e9069ab8d65df674463ab
refs/heads/master
2020-04-04T20:00:52.191601
2018-01-09T08:14:36
2018-01-09T08:14:36
null
0
0
null
null
null
null
UTF-8
Python
false
false
1,544
pyde
add_library('peasycam') add_library('hd') def setup(): size(1200,675,P3D) cam = PeasyCam(this,500) noStroke() global dx,dy,dz dx = 500.0 dy = 400.0 dz = 300.0 b = Box(-dx/2,-dy/2,-dz/2,dx/2,dy/2,dz/2) vs = VoxelSpace() vs.construct(b,3.0) vs.setValueToAll(10000) vd = VoxelDistance(vs) vb = VBox(-200,-150,-100,200,150,100) vd.addVol(vb,b) t = Torus(-200,-150,0,150,40) vd.subVol(t,b) c = Cylinder(-100,250,0,-120,0,0,30) vd.subVol(c,b) bx = VBox(80,100,-50, 180,200,50, 20) vd.subVol(bx,b) vs.makeShell(10,1) for i in range(60): x = random(-160,160) y = random(-120,120) v = PVector.random2D() v.mult(random(0,20)) xt = x + v.x yt = y + v.y cl = Cylinder(x,y,-90,xt,yt,90,5) vd.addSmooth(cl,b,30) vd.subVol(t,b) vd.subVol(c,b) vd.subVol(bx,b) vd.intVol(vb,b) cut = VBox(-dx/2,-dy/2,20,dx/2,dy/2,dz/2) vd.subVol(cut,b) global shp shp = vs.getPShape(this,0.0) def draw(): background(77) directionalLight(255,127, 0, 1, 0, 0) directionalLight( 0,255,127, 0, 1, 0) directionalLight(127, 0,255, 0, 0, 1) directionalLight(255, 0,127,-1, 0, 0) directionalLight(127,255, 0, 0,-1, 0) directionalLight( 0,127,255, 0, 0,-1) shape(shp) global dx,dy,dz noFill() stroke(255) box(dx,dy,dz) box(5)
[ "bernhard@arch.ethz.ch" ]
bernhard@arch.ethz.ch
c6d7fb1b84cec4c11fafe72c947287afa3dc1890
eb1cc6c21c664568ecde2ecbf2af6082da3d3646
/modules/say.py
d0c6df8274e22ecf46f78b9890b8875204dfedfb
[ "BSD-3-Clause" ]
permissive
sammdot/circa
5658d6d460810a25ef41880198103e122927641d
443db2e7a91ed01756d3f1f3e404a5bdccd0ddf4
refs/heads/master
2021-01-18T21:20:19.525465
2015-11-29T18:12:32
2015-11-29T18:12:32
21,531,956
1
0
null
null
null
null
UTF-8
Python
false
false
389
py
from util.esc import unescape class SayModule: require = "cmd" def __init__(self, circa): self.circa = circa self.events = { "cmd.say": [self.say] } self.docs = { "say": "say [msg] → print the message to the current channel. Admins only." } def say(self, fr, to, msg, m): if self.circa.is_admin(m.prefix): self.circa.say(to, unescape(msg)) module = SayModule
[ "sammii.fernandez@gmail.com" ]
sammii.fernandez@gmail.com
4bd371cf1e1980349140f12dff91949aeb2f7bc5
b416e74ea646f04b5c2ad8d21ac7b1d9c997fd20
/test/tests/configs.py
5168787a2ef7c3c678ebf623e7c453ae7ee9ff4d
[ "MIT" ]
permissive
tuanngo/openclinica_sqldatamart
03b2bf7fba42fbb573f9dd81c95911cf5ef4fcba
ecdda4208021ff5a1cb7a20036eed23bd4196af0
refs/heads/master
2020-04-17T13:51:41.369937
2016-12-07T07:41:17
2016-12-07T07:41:17
null
0
0
null
null
null
null
UTF-8
Python
false
false
119
py
db_connection = { "database": "openclinica_fdw_db", "port": "5446", "user": "postgres", "password": "password"}
[ "lindsay.stevens.au@gmail.com" ]
lindsay.stevens.au@gmail.com
4404717afb3d521661f7f093ff8c427fd868d769
0dfb55ef7e9b71c368cd792305b65406555a4852
/virtualclassroom/urls.py
b1e14c0a84ef94f5a88641ae3bc14de755701b7a
[ "MIT" ]
permissive
RavicharanN/VirtualClassroom
a6a208ca85bb3c539238fd038b354bb1fe5592fa
e5200e269e9e592db889a5db5ff7905b40f6c822
refs/heads/master
2022-12-12T23:06:07.556427
2018-04-21T11:17:17
2018-04-21T11:17:17
127,512,142
0
1
MIT
2022-12-08T00:58:28
2018-03-31T08:04:20
HTML
UTF-8
Python
false
false
847
py
"""virtualclassroom URL Configuration The `urlpatterns` list routes URLs to views. For more information please see: https://docs.djangoproject.com/en/1.11/topics/http/urls/ Examples: Function views 1. Add an import: from my_app import views 2. Add a URL to urlpatterns: url(r'^$', views.home, name='home') Class-based views 1. Add an import: from other_app.views import Home 2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home') Including another URLconf 1. Import the include() function: from django.conf.urls import url, include 2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls')) """ from django.conf.urls import url from django.contrib import admin from django.conf.urls import include urlpatterns = [ url(r'^admin/', admin.site.urls), url(r'', include('virtuo.urls')) ]
[ "ravicharan.vsp@gmail.com" ]
ravicharan.vsp@gmail.com
77661c14f71d8a37b13661709b516a772ebaea5f
4178f2916d2da72cbb45454fbed941dcfe8f6460
/Unittest/Test_unittest.py
b8a6df1b3b9eee35df44a820028e5c1f393c0ae7
[]
no_license
maxcrup007/Selenium_Webdriver_Python
15196cb04ba5cafdc5b776c26d167f0b48fb0e14
6be7f0b9f53df1ba592957029e8a4d22e409d1c4
refs/heads/main
2023-03-24T21:04:31.976451
2021-03-22T09:16:04
2021-03-22T09:16:04
349,379,454
0
0
null
null
null
null
UTF-8
Python
false
false
379
py
from selenium import webdriver driver = webdriver.Chrome(executable_path="C:/Users/voraw/Downloads/Compressed/webdriver/chromedriver/chromedriver") class LoginTest(unittest.TestCase): def setUp(self): self.driver = webdriver.Chrome("C:/Users/voraw/Downloads/Compressed/webdriver/chromedriver/chromedriver") self.driver.get("https://www.python.org/")
[ "36732487+maxcrup007@users.noreply.github.com" ]
36732487+maxcrup007@users.noreply.github.com
55886d70e37520349e2ebf8fd1dc7f7867fb368b
9e6a965efd5a873adf642998707e383b6b596020
/algorithms/basicpython/reverse_string.py
a03bb5e5ac18648edf31748bcb842654e97e22a4
[]
no_license
anilqad/training
9069603ad0cb92e7157c138205d4075d78720b63
664fd1bf409e1d5067428ac78ef0db654df15252
refs/heads/master
2022-12-24T11:46:21.298779
2020-10-01T13:22:35
2020-10-01T13:22:35
299,314,913
0
1
null
2020-10-01T13:22:37
2020-09-28T13:18:21
Python
UTF-8
Python
false
false
84
py
s = str (input("Enter The string")) str ="" for i in s: str = i + str print(str)
[ "anilqad@gmail.com" ]
anilqad@gmail.com
c066ce8b84e9b69ee11b163314b32d9b81349686
98c6ea9c884152e8340605a706efefbea6170be5
/examples/data/Assignment_7/hllbra005/question1.py
376cf835a2a072f9e54042391b7879a687350b82
[]
no_license
MrHamdulay/csc3-capstone
479d659e1dcd28040e83ebd9e3374d0ccc0c6817
6f0fa0fa1555ceb1b0fb33f25e9694e68b6a53d2
refs/heads/master
2021-03-12T21:55:57.781339
2014-09-22T02:22:22
2014-09-22T02:22:22
22,372,174
0
0
null
null
null
null
UTF-8
Python
false
false
339
py
# get a unique list of words # HLLBRA005 # words = [] word = input ("Enter strings (end with DONE):\n") while word != "DONE": if word not in words: words.append (word) word = input ("") # print out the unique list print("") print ("Unique list:") for i in range (len(words)): print (words[i])
[ "jarr2000@gmail.com" ]
jarr2000@gmail.com
d922bc4def24bc471ce38fbb57e931510de709b8
e5606385fc77c1fbb30be223637b2b62372d6635
/tango_with_django/urls.py
bd0f1ecdf6812824d29366876704172b826d4a2a
[]
no_license
bhoj001/DJ_Test_App
6b2f116523af72a15370e303af745d7df7d7951c
a20d5ae9049a5d432ba219eb96036953c5512bf5
refs/heads/master
2020-03-24T00:25:57.714156
2018-07-25T11:29:15
2018-07-25T11:29:15
142,290,934
0
0
null
null
null
null
UTF-8
Python
false
false
186
py
from django.conf.urls import url from tango_with_django import views urlpatterns = [ url(r'^', views.index, name='index'), url(r'^index$', views.index, name='index'), ]
[ "nishantkarki305@gmail.com" ]
nishantkarki305@gmail.com
0a663d86d8ff45aa87d8f3e5065ab412e2af0191
132117ecd83e34147263e6fc00024f4b6a9bd018
/Python编程从入门到实践/第七章学习/代码实现/counting.py
8d2010c0881f0f9465e45db58606440d9e482587
[]
no_license
ZYC0515/LearnPython
eda615607bc90c08bd47c11f8e7be11c2e13a046
d4edc742070bda03d8d9017315805562274e2bed
refs/heads/master
2020-07-02T19:05:22.103153
2019-09-14T09:51:05
2019-09-14T09:51:05
201,632,934
0
0
null
null
null
null
UTF-8
Python
false
false
303
py
#counting.py current_number = 1 while current_number <= 5: print(current_number) current_number += 1 current_number = 0 while current_number < 10: current_number += 1 if current_number % 2 == 0: continue print(current_number) x = 1 while x <= 5: print(x) x += 1
[ "925129271@qq.com" ]
925129271@qq.com
59765fbeec8c7868cab3faf3851a468018eeef01
16dfaa65444248ad39b0c60cfb54f14f0d4aaa7f
/test/test_api_response_single_webhooks.py
4e395132ce1e8bfa0c3c66d1810db2cdaf42de6d
[]
no_license
mimepost/mimepost-python
3af0dfaa5dfd5b07154f3c73107bf63044198413
30f81d8c090b2dc75121a572040562c2a950c7e7
refs/heads/master
2023-02-27T10:51:01.903032
2021-02-06T15:11:11
2021-02-06T15:11:11
335,716,791
0
0
null
null
null
null
UTF-8
Python
false
false
1,158
py
# coding: utf-8 """ MimePost API Reference (Beta) MimePost API for sending email. You can find out more about MimePost at [https://mimepost.com](http://mimepost.com). For this sample, you can use the api key `special-key` to test the authorization filters. # noqa: E501 OpenAPI spec version: 0.1.0 Contact: support@mimepost.com Generated by: https://github.com/swagger-api/swagger-codegen.git """ from __future__ import absolute_import import unittest import MimePost from MimePost.models.api_response_single_webhooks import ApiResponseSingleWebhooks # noqa: E501 from MimePost.rest import ApiException class TestApiResponseSingleWebhooks(unittest.TestCase): """ApiResponseSingleWebhooks unit test stubs""" def setUp(self): pass def tearDown(self): pass def testApiResponseSingleWebhooks(self): """Test ApiResponseSingleWebhooks""" # FIXME: construct object with mandatory attributes with example values # model = MimePost.models.api_response_single_webhooks.ApiResponseSingleWebhooks() # noqa: E501 pass if __name__ == '__main__': unittest.main()
[ "tabby" ]
tabby
b1d5a07134bc21ee7bca96e110fbe440beb81f06
43f9c92b25463f443062c0f774c95f8dbe85eadd
/Lab 2/Kode/3a.py
c3c0ca8254419da97eac366d8becc5270cbddba2
[]
no_license
Jonas-Asp/FYS-KJM4710
ae7cbe9b7759ca5701c1ac7b9014962008486ea7
0c8fd31edbe5155e7e3c9f5edd8131e9be7843f8
refs/heads/master
2020-03-27T13:25:19.623133
2018-12-07T16:42:43
2018-12-07T16:42:43
146,608,410
0
0
null
null
null
null
UTF-8
Python
false
false
2,440
py
import numpy as np import matplotlib.pyplot as plt """ 1 MeV """ D = [ 3.4382E-12, 5.0641E-12, 5.0086E-12, 5.0943E-12, 4.8840E-12, 4.8216E-12, 4.8968E-12, 4.9208E-12, 4.7834E-12, 4.7082E-12, 4.7187E-12, 4.6726E-12, 4.6250E-12, 4.6771E-12, 4.5368E-12, 4.4455E-12, 4.3973E-12, 4.3873E-12, 4.4088E-12, 4.2165E-12, 4.2721E-12, 4.0199E-12, 3.9712E-12, 4.0058E-12, 3.9884E-12, 3.8506E-12, 3.5808E-12, 3.4054E-12, 3.2251E-12, 2.9922E-12, 2.8416E-12, 2.6237E-12, 2.4158E-12, 2.2822E-12, 2.1366E-12, 1.9893E-12, 1.8696E-12, 1.7159E-12, 1.6146E-12, 1.5276E-12, 1.4456E-12, 1.3219E-12, 1.2108E-12, 1.1380E-12, 1.0858E-12, 9.8552E-13, 9.3114E-13, 8.9027E-13, 8.1526E-13, 7.6493E-13 ] D = (1/np.max(D)) * np.array(D) x = np.linspace(0.25,31.5,50) plt.xlabel('Longditudal depth [cm]') plt.ylabel('Relative dose') plt.plot(x,D,'o-') plt.show() """ Side """ D = [ 4.9208E-12, 4.8336E-12, 4.8231E-12, 4.4662E-12, 3.1447E-13, 9.3888E-14, 6.0933E-14, 3.7020E-14, 2.8458E-14, 1.9280E-14, 1.5614E-14, 1.3524E-14, 6.8920E-15 ] D = (1/np.max(D)) * np.array(D) x = [1,2,2.5,3,3.5,4,5,6,7,8,9,10,15] plt.xlabel('Lateral lengde [cm]') plt.ylabel('Relative dose') plt.plot(x,D,'o-') plt.show() """ 10 MeV """ D = [ 1.2260E-12, 3.4278E-12, 5.6357E-12, 7.8461E-12, 1.0021E-11, 1.1944E-11, 1.3836E-11, 1.5644E-11, 1.7321E-11, 1.8687E-11, 1.9977E-11, 2.1219E-11, 2.2261E-11, 2.2819E-11, 2.3427E-11, 2.3511E-11, 2.3953E-11, 2.3999E-11, 2.4013E-11, 2.3808E-11, 2.3828E-11, 2.3836E-11, 2.3550E-11, 2.3431E-11, 2.3153E-11, 2.3042E-11, 2.2866E-11, 2.2275E-11, 2.1948E-11, 2.1699E-11, 2.1112E-11, 2.0844E-11, 2.0560E-11, 2.0252E-11, 1.9819E-11, 1.9286E-11, 1.8818E-11, 1.8295E-11, 1.8208E-11, 1.7841E-11, 1.7519E-11, 1.7068E-11, 1.6629E-11, 1.6228E-11, 1.5749E-11, 1.5481E-11, 1.5151E-11, 1.4777E-11, 1.4623E-11, 1.3952E-11 ] D = (1/np.max(D)) * np.array(D) x = np.linspace(0.25,31.5,50) plt.xlabel('Longditudal depth [cm]') plt.ylabel('Relative dose') plt.plot(x,D,'o-') plt.show() """ Side """ D = [ 1.5644E-11, 1.5265E-11, 1.4606E-11, 1.2142E-11, 2.5741E-12, 5.2706E-13, 1.1703E-13, 3.5597E-14, 2.0339E-14, 1.5250E-14, 1.2572E-14, 9.8411E-15, 5.7256E-15 ] D = (1/np.max(D)) * np.array(D) x = [1,2,2.5,3,3.5,4,5,6,7,8,9,10,15] plt.xlabel('Lateral lengde [cm]') plt.ylabel('Relative dose') plt.plot(x,D,'o-') plt.show()
[ "42804793+Jonas-Asp@users.noreply.github.com" ]
42804793+Jonas-Asp@users.noreply.github.com
5df3eaf861df0f741a56f3b6dc909dc5acd0a2ef
a1115883ce50d35482fb5e6662ae246b7782a7aa
/migrations/versions/2018_01_06_7525fd3b67d5_.py
d4208fe23b48a12f37a18ebc9f749861a25bd764
[ "MIT" ]
permissive
viaict/viaduct
4c1252706a23f9ea83e6507ebafef2ced2288746
1faec7e123c3fae7e8dbe1a354ad27b68f2a8cef
refs/heads/develop
2021-01-25T10:17:12.802733
2018-10-13T12:53:47
2018-10-13T12:53:47
18,373,059
11
3
MIT
2020-05-20T16:44:52
2014-04-02T16:26:03
CSS
UTF-8
Python
false
false
351
py
"""empty message. Revision ID: 7525fd3b67d5 Revises: ('2de19b331017', '009352d11348') Create Date: 2018-01-06 17:16:54.405469 """ from alembic import op import sqlalchemy as sa # revision identifiers, used by Alembic. revision = '7525fd3b67d5' down_revision = ('2de19b331017', '825dad56a2c3') def upgrade(): pass def downgrade(): pass
[ "maico.timmerman@gmail.com" ]
maico.timmerman@gmail.com
eeb887882b21b15eaf1a2b617e305409283b808e
11d0cd1983cf1c13e2929548eb946336197b6c51
/fragQueue.py
58a448b659c56a921fa88bfb01a68c9dedba9687
[]
no_license
alkerway/copylivemanifest
1e057a0f639281ad0e49af864660c6e2002b87a2
8158768faa9498cf4e0f5fddb6b28cc0ec64a6d8
refs/heads/master
2023-05-27T03:25:28.422652
2021-05-20T04:32:21
2021-05-20T04:32:21
363,857,558
0
0
null
null
null
null
UTF-8
Python
false
false
4,868
py
import threading import asyncio import httpx from downloader import Downloader from log import log downloader = Downloader() MAX_STALL_COUNT = 20 MAX_FRAG_ERROR = 10 class FragQueue: def __init__(self, outDir, finishCallback, cancelTimer): self.lastQueueIdx = -1 self.lastDownloadedIdx = -1 self.frags = [] self.outDir = outDir self.finishCallback = finishCallback self.cancelTimer = cancelTimer self.wrapUpAndFinish = False self.idxOffset = 0 self.manifestLength = 0 self.referer = '' self.stallCount = 0 self.fragErrorCount = 0 # clear level file open(self.outDir + '/level.m3u8', 'w').close() def add(self, fragArr): newFrags = [] lastIdx = self.lastQueueIdx for frag in fragArr: if frag['idx'] > lastIdx: newFrags.append(frag) log(f'Frag {frag["idx"] - self.idxOffset} added to queue, {threading.active_count()} threads active') self.frags += newFrags if len(self.frags): self.lastQueueIdx = self.frags[-1]['idx'] if not len(newFrags): log('Level stall increment') self.stallCount += 1 if self.stallCount > MAX_STALL_COUNT: log('Level stall exceeded max stall, stopping') self.wrapUpAndFinish = True self.onQueueError() return else: self.stallCount = 0 asyncio.run(self.handleNewFrags(newFrags)) async def handleNewFrags(self, newFrags): results = None async with httpx.AsyncClient() as session: fragDownloads = [] for frag in newFrags: task = asyncio.ensure_future(downloader.downloadFrag(session, frag['remoteUrl'], f'{self.outDir}/{frag["storagePath"]}', self.referer, frag['fragLen'])) fragDownloads.append(task) results = await asyncio.gather(*fragDownloads, return_exceptions=True) for idx, frag in enumerate(newFrags): if results[idx]: log(f'Frag {frag["idx"] - self.idxOffset} downloaded') frag['downloaded'] = True self.onDownloaded(frag) self.fragErrorCount = 0 else: log(f'Error downloading frag {frag["idx"] - self.idxOffset}') self.frags.remove(frag) self.fragErrorCount += 1 if self.fragErrorCount >= MAX_FRAG_ERROR: log('Frag error exceeded max error count, stopping') self.wrapUpAndFinish = True self.onQueueError() self.onFragFinish() def peek(self): if len(self.frags): return self.frags[0] else: return None def onDownloaded(self, frag): if self.frags[0]['downloaded'] and self.lastDownloadedIdx != frag['idx']: curFrag = self.peek() if frag['idx'] != self.lastDownloadedIdx + 1 and self.lastDownloadedIdx > -1: self.frags[0]['tagLines'].insert(0, '#EXT-X-DISCONTINUITY') log(f'!!! Missing frags {self.lastDownloadedIdx + 1 - self.idxOffset} to {curFrag["idx"] - self.idxOffset - 1}') newManifestLines = [] while curFrag and curFrag['downloaded'] == True: newManifestLines = newManifestLines + curFrag['tagLines'] newManifestLines.append(curFrag["storagePath"]) self.lastDownloadedIdx = curFrag['idx'] fragLen = curFrag['fragLen'] self.manifestLength += fragLen log(f'Frag {self.lastDownloadedIdx - self.idxOffset} writing to manifest ({round(self.manifestLength / 60, 1)} min)') self.frags.pop(0) curFrag = self.peek() self.addLinesToLevel(newManifestLines) else: pass self.onFragFinish() def onFragFinish(self): if self.wrapUpAndFinish and len(self.frags) == 0: self.wrapUpLevel() def addLinesToLevel(self, newManifestLines): with open(self.outDir + '/level.m3u8', 'a') as levelFile: levelFile.write('\n'.join(newManifestLines)) levelFile.write('\n') def wrapUpLevel(self): log('Last frag downloaded, finishing up') self.addLinesToLevel(['#EXT-X-ENDLIST', '']) self.finishCallback() def onQueueError(self): self.cancelTimer() def finishAndStop(self): self.wrapUpAndFinish = True log('finishAndStop', len(self.frags)) if len(self.frags) == 0: self.wrapUpLevel() def setIdxOffset(self, offset): self.idxOffset = offset def setReferer(self, referer): self.referer = referer
[ "alex@walkeralex.com" ]
alex@walkeralex.com
aa4b5f9d96c012e427872cb8340f81f84ee3dbfd
b9220b5187651761693170005bf9a0348125bb43
/PythonScripts/newpasswd.py
daf60e8a455fc9964e7b5858a8e3fb5cba850f9d
[]
no_license
brainwerkz/SamplePyScripts
aa5ad53917e72accf7b2cfa2609ba769e80b30f7
a8c33d3a97e2c23108a399da09a7955fd34b3435
refs/heads/master
2021-01-21T17:03:23.653747
2017-07-23T21:47:25
2017-07-23T21:47:25
91,929,257
0
0
null
null
null
null
UTF-8
Python
false
false
318
py
from random import choice import string def GenPasswd(): chars = string.letters + string.digits for i in range(8): newpasswd = newpasswd + choice(chars) return newpasswd #def GenPasswd2(length=8, chars=string.letters + string.digits): # return ''.join([choice(chars) for i in range(length)])
[ "jrichardson@brainwerkz.com" ]
jrichardson@brainwerkz.com
23c3859286f2456ba1b7ddcb56500fe2ce1ea9d3
e2a8a70250671b12735b87e1034a24af6f28ef18
/pytorch/train_res50_8tops.py
78fe8dc2092d50f82cebd4e601195b7e75a7f571
[]
no_license
Alibhji/FrontView_to_BirdeyeView
a06ed4e9b32d4f098f87e1bc4c7774615a38b2af
df00e4b83851440422f22815f6b0bad6cc2af584
refs/heads/master
2022-04-07T19:08:25.137506
2020-03-18T07:39:20
2020-03-18T07:39:20
246,663,819
0
0
null
null
null
null
UTF-8
Python
false
false
5,575
py
# this code is written by Ali Babolhaveji # convert front view to birdeye view # Pytorch + GPU # 3/11/2020 # command : # python -W ignore train_res50_8tops.py from lib import Bkend_res50_8top from lib import Dataset_top_to_birdView from lib import train_ , validation_ import torch import torch.optim as optim import torch.nn as nn from torch.utils.data import DataLoader from tqdm import tqdm import copy import os import shutil import pickle import pandas as pd import gc from torch.utils.tensorboard import SummaryWriter SummaryWriter._PRINT_DEPRECATION_WARNINGS = False import torchvision history = pd.DataFrame() start_epoch = 360 end___epoch = 385 train_batch = 512 val_batch = 512 num_workers = 40 learning_rate=0.2 experiment_name = 'experiment_14'+f'_epoch_{start_epoch}_to_{end___epoch}_batch_{train_batch}_lr_{learning_rate}' load_model_ = True loaded_weights = './runs/experiment_13_epoch_350_to_360_batch_512_lr_0.2/saved_models/model_E352_Loss0.002353.pt' resualt_save_dir = os.path.join('runs',experiment_name) del_dir = input(f"Do you want to delet [{resualt_save_dir}] directory? (y/n) ") if(del_dir=='y'): if(os.path.exists(resualt_save_dir)): shutil.rmtree(resualt_save_dir) else: assert del_dir=='y' , 'the program is stoped.' model_save_dir = os.path.join(resualt_save_dir , 'saved_models') os.makedirs(model_save_dir) #os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID" #os.environ["CUDA_VISIBLE_DEVICES"]="0, 1,2,3" best_loss = 1000000000; # Creaete Model model = Bkend_res50_8top() print(f"[info] Model is created.") if load_model_: state_dict = torch.load(loaded_weights) model.load_state_dict(state_dict) print (f"[info] Model is loaded. [from {loaded_weights}]") # deefine loss and optimizer criterion = nn.MSELoss() optimizer = optim.SGD(model.parameters(), lr=learning_rate, momentum=0.9) # t_in = torch.randn(1,3,224,224 ) # t_in2 = torch.randn(1,4) # model(t_in,t_in2).shape # Create Datasets dataset_root = './data' root_train = os.path.join(dataset_root , 'train') root_val = os.path.join(dataset_root , 'val' ) root_test = os.path.join(dataset_root , 'test' ) train_list = [os.path.join(root_train,itm) for itm in os.listdir(root_train) if os.path.isdir(os.path.join(root_train,itm))] val_list = [os.path.join(root_val,itm) for itm in os.listdir(root_val) if os.path.isdir(os.path.join(root_val,itm))] training_generator = Dataset_top_to_birdView( train_list ,type_='train' ,check_images =True) validation_generator = Dataset_top_to_birdView( val_list ,type_='val' ,check_images =True) # create Dataloader train_loader = DataLoader(training_generator , batch_size = train_batch ,num_workers = num_workers ,shuffle =True , pin_memory =True) val_loader = DataLoader(validation_generator, batch_size = val_batch ,num_workers = num_workers) len_t =len(train_loader) * train_batch len_v =len(val_loader) * val_batch print(f'[info] Train dataset has:{len_t} images.' ) print(f'[info] val dataset has:{len_v} images.' ) #create tensorbordx Logger writer = SummaryWriter(resualt_save_dir) # https://github.com/lanpa/tensorboardX # get some random training images save model architecture and dataset sample dataiter = iter(train_loader) label_front, crop_front ,label_top, meta_data = dataiter.next() # create grid of images img_grid = torchvision.utils.make_grid(crop_front) writer.add_image('training_set_batches', img_grid) writer.add_graph(model, (crop_front ,label_front)) writer.close() # Transfer model on the GPU/GPUs device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") print(f"[info] Devie is:{device}") #torch.cuda.set_device(0) model = model.to(device) #if torch.cuda.device_count() > 1: # print("Let's use", torch.cuda.device_count(), "GPUs!") # model = nn.DataParallel(model ,device_ids=[0,1,2,3]) for epoch in range(start_epoch ,end___epoch): # loop over the dataset multiple times print(f'======================= Epoch {epoch} / {end___epoch} =======================') gc.collect() train_(model , train_loader , epoch, device=device,criterion=criterion , optimizer=optimizer, writer=writer) curr_val_loss =\ validation_ (model ,val_loader , epoch ,device=device, criterion=criterion , writer=writer) for name, param in model.named_parameters(): writer.add_histogram(name, param.clone().cpu().data.numpy(), epoch) writer.close() # print('curr_val_loss' , curr_val_loss) if curr_val_loss < best_loss: model_save_format = f"model_E{epoch:03d}_Loss{curr_val_loss:.6f}.pt" #torch.save(model.state_dict(), os.path.join(experiment_name ,f"./model_E{epoch:03d}_Loss{curr_val_loss:.6f}.pt")) torch.save(model.state_dict(), os.path.join(model_save_dir , model_save_format)) #print (f"./model_E{epoch:03d}_Loss{curr_val_loss:.6f}.pt is saved.") print (os.path.join(model_save_dir , model_save_format) + " is saved.") best_loss = curr_val_loss with open(os.path.join(model_save_dir , f'history.pkl'), 'wb') as handle: pickle.dump(history, handle, protocol=pickle.HIGHEST_PROTOCOL) with open(os.path.join(model_save_dir , f'history_{start_epoch}_{end___epoch}.pkl'), 'wb') as handle: pickle.dump(history, handle, protocol=pickle.HIGHEST_PROTOCOL) print('Finished Training')
[ "40876922+Alibhji@users.noreply.github.com" ]
40876922+Alibhji@users.noreply.github.com
c8631d3949aca9a9bb305346fed17b3df712cbe4
f5afad14aa82fa8fd7ab229e5fd339e9a8aa44ac
/pm4pymdl/util/parquet_importer/__init__.py
71d97d0577010df6f9b5dd42358941ece872da99
[ "MIT" ]
permissive
Javert899/pm4py-mdl
fd58df96e3853f241ca13d507a60d641e5a4cae5
24ecf77a003bcf3db7a7956459eb686f0497d12a
refs/heads/master
2021-11-12T00:34:28.034948
2021-11-05T11:25:50
2021-11-05T11:25:50
207,297,045
8
10
null
null
null
null
UTF-8
Python
false
false
74
py
from pm4pymdl.util.parquet_importer import versions, importer, parameters
[ "javert@northwood.northwood.net" ]
javert@northwood.northwood.net
82cc819f410713a9d81bc08104e554054fba9a09
f5c3297f207ca19e821722c825825fc9bba76298
/A2/q1.py
2e14e9024b98de9b9b88a79951589cbc2e9dee9a
[]
no_license
mayanksha/CS786
57032ecb01d6aa7d10b56db56d15e6bd5215782d
a0f3ba57bd084230eb16c6be0e37faf3485063e9
refs/heads/master
2020-04-27T13:24:27.602535
2019-03-10T15:02:07
2019-03-10T15:13:01
174,368,761
0
0
null
null
null
null
UTF-8
Python
false
false
4,702
py
#!/usr/bin/env python import sys import numpy as np import numpy.matlib import cv2 import matplotlib.pyplot as plt from scipy import ndimage from sklearn import cluster from sklearn.metrics import silhouette_samples, silhouette_score def build_filters(): filters = [] ksize = 10 sigma = 5.0 for theta in np.arange(0, np.pi, np.pi / 16): kern = cv2.getGaborKernel((ksize, ksize), sigma, theta, 10.0, 0.5, 0, ktype=cv2.CV_32F) kern /= 5*kern.sum() filters.append(kern) return filters def detect_object(val): if val == 1: return("Triangle") elif val == 2: return("Square") else: return("Some other shape") def intersection(line1, line2): rho1, theta1 = line1[0], line1[1] rho2, theta2 = line2[0], line2[1] if (abs(theta1 - theta2) < (np.pi/6)): return [None, None] A = np.array([ [np.cos(theta1), np.sin(theta1)], [np.cos(theta2), np.sin(theta2)] ]) b = np.array([[rho1], [rho2]]) x0, y0 = np.linalg.solve(A, b) x0, y0 = int(np.round(x0)), int(np.round(y0)) return [x0, y0] def process(img, filters): accum = np.zeros_like(img) for kern in filters: fimg = cv2.filter2D(img, cv2.CV_8UC3, kern) np.maximum(accum, fimg, accum) return accum def predict(img): img = cv2.cvtColor(img, cv2.COLOR_BGRA2BGR) filters = build_filters() res1 = process(img, filters) gabor_filter_img = res1 gray = cv2.cvtColor(res1, cv2.COLOR_BGR2GRAY) ret,thresh2 = cv2.threshold(gray,0,255,cv2.THRESH_BINARY+cv2.THRESH_OTSU) ret,thresh1 = cv2.threshold(thresh2,0,255,cv2.THRESH_BINARY) # ret,thresh1 = cv2.threshold(thresh1,127,255,cv2.THRESH_BINARY+cv2.THRESH_OTSU) # thresh1 = cv2.erode(thresh1, None) # cv2.imshow('thresh1', thresh1) # if cv2.waitKey(0) & 0xff == 27: # cv2.destroyAllWindows() rho, theta, thresh = 7, np.pi/180, 175 lines = cv2.HoughLines(thresh1, rho, theta, thresh) for line in lines: for r, theta in line: a = np.cos(theta) b = np.sin(theta) x0 = a*r y0 = b*r x1 = int(x0 + 1000*(-b)) y1 = int(y0 + 1000*(a)) x2 = int(x0 - 1000*(-b)) y2 = int(y0 - 1000*(a)) cv2.line(img, (x1,y1), (x2,y2), (0,255,0),2) X = [] for i, group in enumerate(lines[:-1]): for next_group in lines[i+1:]: for line1 in group: for line2 in next_group: x = intersection(line1, line2) if x[0] != None: X.append(x) for i in X: cv2.circle(img, (i[0], i[1]), 2, (255, 0, 0)) lined_image = img sse = {} for k in range(2, 7): kmeans = cluster.KMeans(n_clusters=k, max_iter=1000).fit(X) sse[k] = kmeans.inertia_ # Inertia: Sum of distances of samples to their closest cluster center curve = list(sse.values()) nPoints = len(curve) allCoord = np.vstack((range(nPoints), curve)).T np.array([range(nPoints), curve]) firstPoint = allCoord[0] lineVec = allCoord[-1] - allCoord[0] lineVecNorm = lineVec / np.sqrt(np.sum(lineVec**2)) vecFromFirst = allCoord - firstPoint scalarProduct = np.sum(vecFromFirst * np.matlib.repmat(lineVecNorm, nPoints, 1), axis=1) vecFromFirstParallel = np.outer(scalarProduct, lineVecNorm) vecToLine = vecFromFirst - vecFromFirstParallel distToLine = np.sqrt(np.sum(vecToLine ** 2, axis=1)) max_index = np.argmax(distToLine) return max_index, gabor_filter_img, lined_image, sse if __name__ == '__main__': import argparse parser = argparse.ArgumentParser() parser.add_argument("--shape" , help="1 for Square, 0 for triangle", default="0") args = parser.parse_args() print(args) if args.shape == "1": img_fn = './square.png' else: img_fn = './triangle.png' img = cv2.imread(img_fn) if img is None: print 'Failed to load image file:', img_fn sys.exit(1) max_index, gabor_filter_img, lined_image, sse = predict(img) cv2.imshow('Gabor Filtered Image', gabor_filter_img) cv2.imwrite('Gabor_square.png', gabor_filter_img) if cv2.waitKey(0) & 0xff == 27: cv2.destroyAllWindows() cv2.imshow('Intersection Points of Gabor Filter Image', lined_image) cv2.imwrite('hough_square.png', lined_image) if cv2.waitKey(0) & 0xff == 27: cv2.destroyAllWindows() plt.figure() plt.plot(list(sse.keys()), list(sse.values())) plt.xlabel("Number of cluster") plt.ylabel("SSE") plt.show() print(detect_object(max_index))
[ "mayank8019@gmail.com" ]
mayank8019@gmail.com
96fed0ccccb1e58f48083f151ce872274917d221
d57040c304cbe77e5a13d3cab98b15ba2f741c41
/PCfix/migrations/0018_order_annuluseropenid.py
ea197558110f9953eaadc475ea3a8cce2e4ae6a2
[]
no_license
XQrobby/fix
1d2ce929c3e0d869bffde2cab94c174bd6eee81c
d9f86069713dce84c562312fb9805048be2af15d
refs/heads/master
2020-04-23T21:39:20.789989
2019-02-19T13:58:50
2019-02-19T13:58:50
171,101,089
0
0
null
null
null
null
UTF-8
Python
false
false
402
py
# Generated by Django 2.1.5 on 2019-01-29 11:59 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('PCfix', '0017_auto_20190129_1155'), ] operations = [ migrations.AddField( model_name='order', name='annulUserOpenid', field=models.CharField(default='', max_length=50), ), ]
[ "1260777400@qq.com" ]
1260777400@qq.com
1e21ab84e958a395cdfc22bba305321df5eea74a
cf7b827958166c8569eb58deb511cc3f07567741
/in_Python/1716 Calculate Money in Leetcode Bank.py
cf86e90ac27c7419eb082d5d8c85fe8500ec0088
[]
no_license
YangLiyli131/Leetcode2020
e4e36eb36b1983f73b0e733455b4a7953dfebe6d
20623defecf65cbc35b194d8b60d8b211816ee4f
refs/heads/master
2023-08-22T06:00:55.924112
2021-09-18T19:04:15
2021-09-18T19:04:15
251,426,203
0
0
null
null
null
null
UTF-8
Python
false
false
413
py
class Solution(object): def totalMoney(self, n): """ :type n: int :rtype: int """ res = 0 weeks = n / 7 remain = n % 7 for i in range(1, weeks+1): res += (i + i + 6) * 7 / 2 start = weeks + 1 while remain != 0: res += start start += 1 remain -= 1 return res
[ "noreply@github.com" ]
YangLiyli131.noreply@github.com
718cfa6a8af7b62782c68f204154f3455a18b862
193e25de2add1004b9163adc369dd062b359f78e
/labs-main/lab7/Task1/informatics-tasks/3.py
82f3d11818e14ab883512593f5a1b7222e22f0c5
[]
no_license
rakhatyeskenov/Web-development-2021-spring
f56237f4d7307703f0ee4e0687036805bbee1de7
7541f694f3da8639be2e1323073d121caff87040
refs/heads/main
2023-04-02T21:14:58.412948
2021-04-14T21:52:21
2021-04-14T21:52:21
336,210,777
0
0
null
null
null
null
UTF-8
Python
false
false
49
py
n = int(input()) k = int(input()) print(k // n)
[ "noreply@github.com" ]
rakhatyeskenov.noreply@github.com
8d4a6c6c3a16885bf55b23360fa1339bbaa997e2
fefdb381958ecebf4f7b7bafe6b92721035cc9e5
/src/visualizer.py
0952d8ec0125ab45df6ccf111b174b345ea3d0c6
[]
no_license
SoICT-RA-TKV/fso_data_generator
f7b5c8927e4c9aabeecc90eeba01429082b8887b
5b241bdd4edac127f1bee10e9f28f7cdb6ef292f
refs/heads/master
2023-02-27T09:05:51.926091
2021-02-01T18:14:33
2021-02-01T18:14:33
230,081,677
0
0
null
null
null
null
UTF-8
Python
false
false
2,220
py
import json import sys from matplotlib import pyplot as plt import dict_txt import os global prefix global visualize_func def visualize_density(file): if file.split('/')[-1] != 'density.txt': return stream = open(file, 'r') mapName = stream.readline().split(' ')[0].replace('\n', '') NMap = int(stream.readline()) ratio = float(stream.readline()) stream.readline() density = [] while True: tmp = stream.readline().replace('\n', '').split(' ') try: density.append([float(i) for i in tmp]) except: break Nr = len(density) Nc = len(density[0]) plt.imshow(density, interpolation='nearest', cmap='coolwarm', vmin=0, vmax=1) plt.savefig(file.replace('txt', 'png')) plt.clf() def visualize_fso(file): _map = dict_txt.fso_txt2dict(file) r = [] c = [] for fso in _map["FSO"]: r.append(fso["r"]) c.append(fso["c"]) plt.scatter(c, r, marker = '+') plt.axis('scaled') image_file = file.split('.') image_file[-1] = 'png' image_file = '.'.join(image_file) plt.savefig(image_file) plt.clf() def visualize_hap(file): _map = dict_txt.hap_txt2dict(file) HAPS = _map["HAP"] for HAP in HAPS: h = HAP["coordinates"] r = [] c = [] for fso in HAP["FSO"]: r.append(fso["r"]) c.append(fso["c"]) for i in range(len(c)): plt.plot([h["c"], c[i]], [h["r"], r[i]], c = 'g') plt.scatter([h["c"]], [h["r"]], c = 'r') plt.scatter(c, r, c = 'b', marker = '+') plt.axis('scaled') image_file = file.split('.') image_file[-1] = 'png' image_file = '.'.join(image_file) plt.savefig(image_file) plt.clf() def visualize_batch(file = 'data'): global prefix global visualize_func if os.path.isfile(file): for p in prefix: if file.split('/')[-1].find(p) == 0 and file.split('.')[-1] == 'txt': print('Visualizing {}'.format(file)) visualize_func[p](file) break else: for f in os.listdir(file): visualize_batch(file = file + '/' + f) def main(): global prefix global visualize_func prefix = ['gfso', 'clustering', 'density'] visualize_func = { 'gfso': visualize_fso, 'clustering': visualize_hap, 'density': visualize_density } if len(sys.argv) > 1: visualize_batch(file = sys.argv[1]) else: visualize_batch() if __name__ == "__main__": main()
[ "vuong.1998@gmail.com" ]
vuong.1998@gmail.com
25a14a49de07c42b42f9f0090194a0c59b6c028b
795963c77fd920b5ba91e9045777c2137af37285
/Python-jk/419.py
101a8f44576f1f6679406029c0c6db1dbbbe7650
[]
no_license
kedixa/LeetCode
dce74c9395d366db559302b48b0b21cd4f869119
08acab1cebfac2aed4816cf9d8186a22304488a9
refs/heads/master
2020-07-04T05:11:33.019564
2018-03-08T13:30:18
2018-03-08T13:30:18
66,902,234
1
0
null
null
null
null
UTF-8
Python
false
false
905
py
class Solution(object): def countBattleships(self, board): """ :type board: List[List[str]] :rtype: int """ count=0 last=False for i,row in enumerate(board): last=False for j,c in enumerate(row): if c=='X': if last==False: count=count+1 last=True else: last=False tboard=map(list, zip(*board)) for i,row in enumerate(tboard): last=False for j,c in enumerate(row): if c=='X': if last==True: count=count-1 last=True else: last=False return count # board=[ # 'X..X', # 'X...', # 'X.XX'] # a=Solution() # b=a.countBattleships(board) # print(b)
[ "xinwojiushiwokun@163.com" ]
xinwojiushiwokun@163.com
0064260f97e65d9800bf0c9d5c2dc45eaf5ca44f
e168a4de5e6857bb70e2b3abb5782a6b9940dcd5
/convert_data.py
ae736d95cdbfdbef91ee92cbb65b6a3b1ccb2acb
[]
no_license
cuulee/datashader_powergrid
0d96f2efb9cce8ad43d7e0fbf4e8035ef805e438
da76653982b803be05a069796f5ebba614103db8
refs/heads/main
2023-07-16T02:24:10.305312
2021-09-01T03:18:21
2021-09-01T03:18:21
null
0
0
null
null
null
null
UTF-8
Python
false
false
3,006
py
# ========== (c) JP Hwang 28/6/21 ========== import logging import pandas as pd import numpy as np from utils import ll2en logger = logging.getLogger(__name__) root_logger = logging.getLogger() root_logger.setLevel(logging.INFO) sh = logging.StreamHandler() formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s') sh.setFormatter(formatter) root_logger.addHandler(sh) desired_width = 320 pd.set_option('display.max_columns', 20) pd.set_option('display.width', desired_width) import geopandas as gpd from spatialpandas import GeoDataFrame trans_lines = gpd.read_file('temp/shp/Transmission_Lines.shp', encoding='utf8') # ============================== # Convert to spatialpandas DF # ============================== df = GeoDataFrame(trans_lines) df = df.assign(bounds=df["geometry"].apply(lambda x: x.to_shapely().bounds)) df = df.assign(lon_a=df["bounds"].apply(lambda x: x[0])) df = df.assign(lat_a=df["bounds"].apply(lambda x: x[1])) df = df.assign(lon_b=df["bounds"].apply(lambda x: x[2])) df = df.assign(lat_b=df["bounds"].apply(lambda x: x[3])) df = df.drop("bounds", axis=1) df = df.assign(LAT=df["lat_a"]) df = df.assign(LON=df["lon_a"]) vals_en = np.array(ll2en(df[["LON", "LAT"]].values)) # Add easting/northing dims df = df.assign(x_en=vals_en[:, 0]) df = df.assign(y_en=vals_en[:, 1]) df = df.assign(wire_type="UNKNOWN") df.loc[df["TYPE"].str.contains("OVERHEAD"), "wire_type"] = "OVERHEAD" df.loc[df["TYPE"].str.contains("UNDERGROUND"), "wire_type"] = "UNDERGROUND" # ============================== # Convert LL to EN # ============================== from spatialpandas import GeoSeries df = df.assign(geometry_ll=df["geometry"]) en_geom = trans_lines["geometry"].to_crs("EPSG:3395") df = df.assign(geometry=GeoSeries(en_geom)) df.to_parquet("data/grid/Transmission_Lines_proc.parq") df = df[["TYPE", "STATUS", 'OWNER', 'VOLTAGE', 'VOLT_CLASS', "SHAPE_Leng", 'geometry', 'lon_a', 'lat_a', 'lon_b', 'lat_b', 'LAT', 'LON', 'x_en', 'y_en']] df.to_parquet("data/grid/Transmission_Lines_proc_sm.parq") import dask.dataframe as dd ddf = dd.from_pandas(df, npartitions=2) ddf_packed = ddf.pack_partitions(npartitions=2) ddf_packed.to_parquet('temp/Transmission_Lines_proc_sm_packed.parq') # Convert solar/wind data solar_df = pd.read_csv("data/Power_Plants_Solar.csv") vals_en = np.array(ll2en(solar_df[["Longitude", "Latitude"]].values)) solar_df = solar_df.assign(x_en=vals_en[:, 0]) solar_df = solar_df.assign(y_en=vals_en[:, 1]) solar_df = solar_df.assign(LAT=solar_df["Latitude"]) solar_df = solar_df.assign(LON=solar_df["Longitude"]) solar_df.to_csv("data/Power_Plants_Solar_proc.csv") wind_df = pd.read_csv("data/Power_Plants_Wind.csv") vals_en = np.array(ll2en(wind_df[["Longitude", "Latitude"]].values)) wind_df = wind_df.assign(x_en=vals_en[:, 0]) wind_df = wind_df.assign(y_en=vals_en[:, 1]) wind_df = wind_df.assign(LAT=wind_df["Latitude"]) wind_df = wind_df.assign(LON=wind_df["Longitude"]) wind_df.to_csv("data/Power_Plants_Wind_proc.csv")
[ "me@jphwang.com" ]
me@jphwang.com
a92c16f8a77e9889f0009e23aadbe1864b874b51
331cae3f7f22e07d436a0c6fe8607ff997c589f6
/qmhub/qmmm.py
7efe1d5fcc09e26ef20b3a97fad4b55812cdf2b1
[ "MIT", "LicenseRef-scancode-warranty-disclaimer" ]
permissive
evohringer/QMHub
b368389e8317a42705aa2da865126dc8d98390b8
e85ada102fb3500316058af51e8de1cf0ae35aac
refs/heads/master
2020-04-09T08:31:18.200601
2018-03-02T20:32:23
2018-03-02T20:32:23
null
0
0
null
null
null
null
UTF-8
Python
false
false
4,530
py
import os import sys import numpy as np from . import embedtools from . import mmtools from . import qmtools class QMMM(object): def __init__(self, fin=None, baseDir=None, mmSoftware=None, qmCharge=None, qmMult=None, qmSoftware=None, qmEmbedNear=None, qmEmbedFar=None, qmElement=None, qmRefCharge=True, qmSwitchingType='shift', qmCutoff=None, qmSwdist=None, qmReadGuess=False, postProc=False): """ Creat a QMMM object. """ self.baseDir = baseDir self.mmSoftware = mmSoftware self.qmSoftware = qmSoftware self.qmCharge = qmCharge self.qmMult = qmMult self.qmEmbedNear = qmEmbedNear self.qmEmbedFar = qmEmbedFar self.qmElement = qmElement self.qmRefCharge = qmRefCharge self.qmSwitchingType = qmSwitchingType self.qmCutoff = qmCutoff self.qmSwdist = qmSwdist self.qmReadGuess = qmReadGuess self.postProc = postProc # Initialize the system if self.mmSoftware is None: self.mmSoftware = sys.argv[2] if fin is None: fin = sys.argv[1] self.system = mmtools.choose_mmtool(self.mmSoftware)(fin) if self.qmElement is not None: self.system.qm_atoms.element = np.asarray(self.qmElement) if self.qmCharge is None: self.qmCharge = self.system.qm_charge if self.qmMult is None: self.qmMult = self.system.qm_mult # Set up embedding scheme self.embed = embedtools.choose_embedtool(self.qmEmbedNear, self.qmEmbedFar)( self.system, self.qmRefCharge, self.qmSwitchingType, self.qmCutoff, self.qmSwdist) # Initialize the QM system if self.baseDir is None: self.baseDir = os.path.dirname(os.path.abspath(fin)) + "/" self.qm = qmtools.choose_qmtool(self.qmSoftware)( self.baseDir, self.embed, self.qmCharge, self.qmMult) if self.qmReadGuess and not self.system.step == 0: self.qm.read_guess = True else: self.qm.read_guess = False if self.postProc: self.qm.calc_forces = False else: self.qm.calc_forces = True def run_qm(self, **kwargs): """Run QM calculation.""" self.qm.get_qm_params(**kwargs) self.qm.gen_input() self.qm.run() if self.qm.exitcode != 0: sys.exit(self.qm.exitcode) def dry_run_qm(self, **kwargs): """Generate input file without running QM calculation.""" if self.postProc: self.qm.get_qm_params(**kwargs) self.qm.gen_input() else: raise ValueError("dryrun_qm() can only be used with postProc=True.") def parse_output(self): """Parse the output of QM calculation.""" if self.postProc: pass elif hasattr(self.qm, 'exitcode'): if self.qm.exitcode == 0: self.system.parse_output(self.qm) self.system.apply_corrections(self.embed) else: raise ValueError("QM calculation did not finish normally.") else: raise ValueError("Need to run_qm() first.") def save_results(self): """Save the results of QM calculation to file.""" if hasattr(self.system, 'qm_energy'): self.system.save_results() else: raise ValueError("Need to parse_output() first.") def save_charges(self): """Save the QM and MM charges to file (for debugging only).""" system_scale = np.ones(self.system.n_atoms) system_dij_min = np.zeros(self.system.n_atoms) system_charge = np.zeros(self.system.n_atoms) system_scale.flat[self.system.mm_atoms.real_atoms.index] = self.embed.charge_scale system_dij_min[self.system.mm_atoms.real_atoms.index] = self.system.mm_atoms.real_atoms.dij_min system_charge[self.system.mm_atoms.real_atoms.index] = self.system.mm_atoms.real_atoms.charge system_charge.flat[self.embed.mm_atoms_near.real_atoms.index] = self.embed.mm_atoms_near.charge_near system_charge[self.system.qm_atoms.real_atoms.index] = self.system.qm_atoms.real_atoms.charge np.save(self.baseDir + "system_scale", system_scale) np.save(self.baseDir + "system_dij_min", system_dij_min) np.save(self.baseDir + "system_charge", system_charge)
[ "panxl@users.noreply.github.com" ]
panxl@users.noreply.github.com
1e398ac9d85fbf1aa907335a011290c72f6e74d7
506f2babfe7000511892e843a231312110fe5d2b
/backend/noisy_fog_20154/settings.py
d5727a11b00198e2ea82f5f169abc5331e537076
[]
no_license
crowdbotics-apps/noisy-fog-20154
6ecff3404ba6c6ff7871ae65ad1a7c841ad49d6e
8ba11c1d964e8c9fc61501c0ec1e39ec965f4e3a
refs/heads/master
2022-12-10T21:28:39.843546
2020-09-09T21:40:58
2020-09-09T21:40:58
294,235,835
0
0
null
null
null
null
UTF-8
Python
false
false
5,901
py
""" Django settings for noisy_fog_20154 project. Generated by 'django-admin startproject' using Django 2.2.2. For more information on this file, see https://docs.djangoproject.com/en/2.2/topics/settings/ For the full list of settings and their values, see https://docs.djangoproject.com/en/2.2/ref/settings/ """ import os import environ env = environ.Env() # SECURITY WARNING: don't run with debug turned on in production! DEBUG = env.bool("DEBUG", default=False) # Build paths inside the project like this: os.path.join(BASE_DIR, ...) BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) # Quick-start development settings - unsuitable for production # See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/ # SECURITY WARNING: keep the secret key used in production secret! SECRET_KEY = env.str("SECRET_KEY") ALLOWED_HOSTS = env.list("HOST", default=["*"]) SITE_ID = 1 SECURE_PROXY_SSL_HEADER = ("HTTP_X_FORWARDED_PROTO", "https") SECURE_SSL_REDIRECT = env.bool("SECURE_REDIRECT", default=False) # Application definition INSTALLED_APPS = [ 'django.contrib.admin', 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.messages', 'django.contrib.staticfiles', 'django.contrib.sites' ] LOCAL_APPS = [ 'home', 'users.apps.UsersConfig', ] THIRD_PARTY_APPS = [ 'rest_framework', 'rest_framework.authtoken', 'rest_auth', 'rest_auth.registration', 'bootstrap4', 'allauth', 'allauth.account', 'allauth.socialaccount', 'allauth.socialaccount.providers.google', 'django_extensions', 'drf_yasg', # start fcm_django push notifications 'fcm_django', # end fcm_django push notifications ] INSTALLED_APPS += LOCAL_APPS + THIRD_PARTY_APPS MIDDLEWARE = [ 'django.middleware.security.SecurityMiddleware', 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.common.CommonMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', 'django.middleware.clickjacking.XFrameOptionsMiddleware', ] ROOT_URLCONF = 'noisy_fog_20154.urls' TEMPLATES = [ { 'BACKEND': 'django.template.backends.django.DjangoTemplates', 'DIRS': [], 'APP_DIRS': True, 'OPTIONS': { 'context_processors': [ 'django.template.context_processors.debug', 'django.template.context_processors.request', 'django.contrib.auth.context_processors.auth', 'django.contrib.messages.context_processors.messages', ], }, }, ] WSGI_APPLICATION = 'noisy_fog_20154.wsgi.application' # Database # https://docs.djangoproject.com/en/2.2/ref/settings/#databases DATABASES = { 'default': { 'ENGINE': 'django.db.backends.sqlite3', 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'), } } if env.str("DATABASE_URL", default=None): DATABASES = { 'default': env.db() } # Password validation # https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators AUTH_PASSWORD_VALIDATORS = [ { 'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator', }, { 'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator', }, { 'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator', }, { 'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator', }, ] # Internationalization # https://docs.djangoproject.com/en/2.2/topics/i18n/ LANGUAGE_CODE = 'en-us' TIME_ZONE = 'UTC' USE_I18N = True USE_L10N = True USE_TZ = True # Static files (CSS, JavaScript, Images) # https://docs.djangoproject.com/en/2.2/howto/static-files/ STATIC_URL = '/static/' MIDDLEWARE += ['whitenoise.middleware.WhiteNoiseMiddleware'] AUTHENTICATION_BACKENDS = ( 'django.contrib.auth.backends.ModelBackend', 'allauth.account.auth_backends.AuthenticationBackend' ) STATIC_ROOT = os.path.join(BASE_DIR, "staticfiles") STATICFILES_DIRS = [ os.path.join(BASE_DIR, 'static') ] STATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage' # allauth / users ACCOUNT_EMAIL_REQUIRED = True ACCOUNT_AUTHENTICATION_METHOD = 'email' ACCOUNT_USERNAME_REQUIRED = False ACCOUNT_EMAIL_VERIFICATION = "mandatory" ACCOUNT_CONFIRM_EMAIL_ON_GET = True ACCOUNT_LOGIN_ON_EMAIL_CONFIRMATION = True ACCOUNT_UNIQUE_EMAIL = True LOGIN_REDIRECT_URL = "users:redirect" ACCOUNT_ADAPTER = "users.adapters.AccountAdapter" SOCIALACCOUNT_ADAPTER = "users.adapters.SocialAccountAdapter" ACCOUNT_ALLOW_REGISTRATION = env.bool("ACCOUNT_ALLOW_REGISTRATION", True) SOCIALACCOUNT_ALLOW_REGISTRATION = env.bool("SOCIALACCOUNT_ALLOW_REGISTRATION", True) REST_AUTH_SERIALIZERS = { # Replace password reset serializer to fix 500 error "PASSWORD_RESET_SERIALIZER": "home.api.v1.serializers.PasswordSerializer", } REST_AUTH_REGISTER_SERIALIZERS = { # Use custom serializer that has no username and matches web signup "REGISTER_SERIALIZER": "home.api.v1.serializers.SignupSerializer", } # Custom user model AUTH_USER_MODEL = "users.User" EMAIL_HOST = env.str("EMAIL_HOST", "smtp.sendgrid.net") EMAIL_HOST_USER = env.str("SENDGRID_USERNAME", "") EMAIL_HOST_PASSWORD = env.str("SENDGRID_PASSWORD", "") EMAIL_PORT = 587 EMAIL_USE_TLS = True # start fcm_django push notifications FCM_DJANGO_SETTINGS = { "FCM_SERVER_KEY": env.str("FCM_SERVER_KEY", "") } # end fcm_django push notifications # Swagger settings for api docs SWAGGER_SETTINGS = { "DEFAULT_INFO": f"{ROOT_URLCONF}.api_info", } if DEBUG: # output email to console instead of sending EMAIL_BACKEND = "django.core.mail.backends.console.EmailBackend"
[ "team@crowdbotics.com" ]
team@crowdbotics.com
32c278c7dbcdb628504fa6dff36eef7568cc9379
80c448b985262408a05b10d2d03c38ef52947642
/django/django-kong-admin-0.4.0/kong_admin/migrations_bak/migrations/0013_auto_20160422_0058.py
24326817e1449863cd51a13008019541d2b7f903
[]
no_license
BUPT902/API
93e55a447555d3372f6c419b5dbc7425852ddaa7
66742e102f83fe2fb016389dce3b9bc72925b234
refs/heads/master
2016-09-13T16:13:28.651865
2016-06-02T01:18:30
2016-06-02T01:18:30
57,945,363
0
0
null
null
null
null
UTF-8
Python
false
false
660
py
# -*- coding: utf-8 -*- from __future__ import unicode_literals from django.db import models, migrations class Migration(migrations.Migration): dependencies = [ ('kong_admin', '0012_auto_20160421_0758'), ] operations = [ migrations.AlterModelOptions( name='apireference', options={'verbose_name': 'API', 'verbose_name_plural': 'API'}, ), migrations.AddField( model_name='apireference', name='owner', field=models.OneToOneField(related_name='infos', default=None, to='kong_admin.ConsumerReference', help_text='API\u6240\u5c5e\u4eba'), ), ]
[ "ximu470791413@163.com" ]
ximu470791413@163.com
340140def38bdae76f2b9b03a19b62b8b6e7d141
a30166ac71e4b1c1e07d67d07a07c99b12811005
/Algoritmos/Lista 02 - Estrutura Condicional/QUESTAO05.py
d65d568526f0ade21a943feac1ba58549e0e42d2
[]
no_license
WalterBluman/Tecnologo-ADS
3615422a36d4b3169f7534601c8bbc9abe25f1ef
cebf94e482fa0e348aeda0f66b34ca3f26a2aa27
refs/heads/master
2020-12-03T19:15:28.025905
2018-07-18T18:22:54
2018-07-18T18:22:54
null
0
0
null
null
null
null
UTF-8
Python
false
false
597
py
# -*- coding: utf-8 -*- def ordenar(n1,n2,n3): if n1<n2<n3: print(n1,n2,n3) if n3<n2<n1: print(n3,n2,n1) if n2<n1<n3: print(n2,n1,n3) if n2<n3<n1: print(n2,n3,n1) if n1<n3<n2: print(n1,n3,n2) if n3<n1<n2: print(n3,n1,n2) def main(): print("digite tres numeros") num1 = float(input("escreva primeiro numero: ")) num2 = float(input('escreva segundo numero: ')) num3 = float(input('escreva terceiro numero: ')) resultado = ordenar(num1,num2,num3) print(resultado) if __name__ == '__main__': main()
[ "sywrahgabriella@hotmail.com" ]
sywrahgabriella@hotmail.com
ba98858a633b7ec3ed95ab0a7dd9eb63ec46eb64
cd70537839d0ffea8d42666984b037e6d43328b1
/98_Validate_Binary_Search_Tree_Recursive_min_max.py
fb8aa7d1c1d67a32a3792b0ead5e209831b59f66
[]
no_license
Suyash906/Trees-1
a59a7136ed1816cb5bdcfa11a84a7614a853b765
8ab324e91ef7d51183f04b647fe6f461b1f96b1a
refs/heads/master
2022-11-09T00:49:28.993328
2020-06-27T19:31:25
2020-06-27T19:31:25
271,968,459
0
0
null
2020-06-13T08:17:10
2020-06-13T08:17:09
null
UTF-8
Python
false
false
1,325
py
# Time Complexity : O(n) [n = number of elements in preorder list] # Space Complexity : O(log n) [n = number of elements in preorder list, in other words height of tree] # Did this code successfully run on Leetcode : Yes # Any problem you faced while coding this : No # # Problem approach # 1. For any node all the nodes on its left have lesser value than node and all the nodes on the right have greater value # 2. Using this property we can start from root and pass along the min_value and max_value for each left child and right child recursive call # 3. The node value and must be greater than min_val and lesser than max_val # 3.1 For left child recirsive call, the max_val is updated to node.val # 3.2 For right child recirsive call, the min_val is updated to node.val class Solution: def isValidBSTUtil(self, root, min_val, max_val): # Base case if not root: return True if root.val > min_val and root.val < max_val and self.isValidBSTUtil(root.left, min_val, root.val) and self.isValidBSTUtil(root.right, root.val, max_val): return True return False def isValidBST(self, root: TreeNode) -> bool: if not root:return True min_val = float(-inf) max_val = float(inf) return self.isValidBSTUtil(root, min_val, max_val)
[ "suyash90682711@gmail.com" ]
suyash90682711@gmail.com
42a078875f0a2bc616bfb980fcc186c24442adb8
60326067f843ba99a39b7aa326800fad5391e966
/translate-layer/word2root/source.py
99e6ef4a9eb307ff112497e58000233ea39395e7
[]
no_license
alpham/draft
be9751c94d6806bd35a713c48e36ff0cb80cb7f4
825520a41797e764868f34497b6845a81922b0fa
refs/heads/master
2016-08-04T21:33:31.652048
2014-08-05T13:18:17
2014-08-05T13:18:17
null
0
0
null
null
null
null
UTF-8
Python
false
false
644
py
from PyQt4.QtCore import * from PyQt4.QtGui import * import sys from nltk.stem.isri import * import test_ui class test(QMainWindow,test_ui.Ui_MainWindow): def __init__(self,parent=None): self.ui = test_ui.Ui_MainWindow() QMainWindow.__init__(self,parent) self.ui.setupUi(self) self.setupConnections() def setupConnections(self): QObject.connect(self.ui.retrieve, SIGNAL('clicked()'), self.retrieve_function) def retrieve_function(self): word = self.ui.word.text() stemmer = ISRIStemmer() stemmed = stemmer.stem(unicode(word)) self.ui.root.setText(stemmed)
[ "ahmed.magdy@code95.com" ]
ahmed.magdy@code95.com
b6bf5f11fe8c566b8c422ab37b536933ef9a23a2
d5711bf1c0306a6d9e3de223f65e3eb4154eb6dd
/python/function.py
a72192ab70f31ded38d97d998d335ef0b4e8291e
[]
no_license
zyl-hub/wiki
12c369bf7a9d711d5102b0b93c7cf17966e95be6
448d4bfcae769fb34898359b89b49893f84df8f8
refs/heads/master
2023-03-14T02:00:09.081009
2021-03-03T15:00:56
2021-03-03T15:00:56
291,210,727
0
0
null
null
null
null
UTF-8
Python
false
false
541
py
import math # help(abs) def my_abs(x): if not isinstance(x, (int, float)): raise TypeError('bad operand type') if x >= 0: return x else: return -x b = my_abs(-2) # raise error # my_abs('a') print(b) # none function def nop(): pass # 返回一个tuple def move(x, y, step, angle=0): nx = x+step*math.cos(angle) ny = y+step*math.sin(angle) return nx, ny print(move(12, 12, 1, math.pi/6)) def power(x, n): s = 1 while n > 0: n = n-1 s = s*x return s
[ "1016076582@qq.com" ]
1016076582@qq.com
6168ea1002c7c18658f9118b3a9c3059ff235830
0fcf311feebd5887d9566fcd36e1095ae57ac653
/csv_output.py
8560a6e9e7411dba8c0df387f865aa8b235d231d
[]
no_license
kwask96/scrape-financial-statement
dfb8c95646cc7a2b745dc801978f1af1df569c5a
832241153da55c5c08249342251bb8454594e1a7
refs/heads/master
2023-03-18T09:40:45.251170
2017-05-17T00:56:08
2017-05-17T00:56:08
null
0
0
null
null
null
null
UTF-8
Python
false
false
861
py
import csv def to_csv (data): ''' data: (symbol: string, statement_type: string, matrix: 2d-list) return: None side effect: write "Symbol_statement_type.csv" to file system ''' symbol, statement_type, matrix = data filename = symbol + '_' + statement_type.replace(' ', '_') + '.csv' # Insert symbol and statement type matrix.insert(0, [symbol + " - " + statement_type + " Statement"]) matrix.insert(2, ['']) with open(filename, 'w', newline='') as csvfile: spamwriter = csv.writer(csvfile, delimiter=',', quoting=csv.QUOTE_MINIMAL) for row in matrix: if len(row) > 1: spamwriter.writerow(row) elif len(row) == 1: # A section header/footer spamwriter.writerow([]) spamwriter.writerow(row)
[ "domingohui@gmail.com" ]
domingohui@gmail.com
f663626494fad3636baf7dca7e74d0fe4fe4fbd4
229181f4059d7f0a18baa11616b56551930f5a29
/backendGit/models/group.py
4de133e4de2479688d90d8c78fab354e94147e94
[]
no_license
antoniomanzanom/GroupFinderPublic
760690662d7a755551ea699a1a972e092f2edc1c
5ade1b0027c6f7e2279bbb77d2869bcf8e5c501f
refs/heads/master
2023-05-12T06:39:36.602481
2021-06-11T14:03:19
2021-06-11T14:03:19
375,653,281
0
0
null
null
null
null
UTF-8
Python
false
false
1,459
py
from db import db class GroupModel(db.Model): __tablename__ = 'groups' # Atributos del Usuario id = db.Column(db.Integer, primary_key=True) name = db.Column(db.String(100), unique=True) descripcion =db.Column(db.String(100), nullable=False) privado = db.Column(db.Boolean(), nullable=True) mazmorra = db.Column(db.String(100), nullable=False) dificultad = db.Column(db.String(100), nullable=False) roles = db.Column(db.String(200), nullable=True) participantes = db.relationship('UserModel', backref='group', cascade="delete") def __init__(self, name,descripcion,mazmorra,dificultad): self.name = name self.descripcion = descripcion self.mazmorra = mazmorra self.dificultad = dificultad def json(self): return { 'id': self.id, 'name': self.name, 'descripcion': self.descripcion, 'mazmorra': self.mazmorra, 'dificultad':self.dificultad, 'roles':self.roles } # Métodos definidos para el ORM SQLAlchemy def save_to_db(self): db.session.add(self) db.session.commit() def delete_from_db(self): db.session.delete(self) db.session.commit() @classmethod def find_by_name(cls, name): return cls.query.filter_by(name=name).first() @classmethod def find_by_id(cls, _id): return cls.query.filter_by(id=_id).first()
[ "amanzanom98@gmail.com" ]
amanzanom98@gmail.com
c796d1cf2c8148c5af06566916762a2b44e34ab6
4a865273b94fc1c1fa70c4c8dbc34d57f3e4d602
/airflow-dag/product_category_name_translation_test.py
0179e7ee115159dd1033fecd421d685000830546
[]
no_license
edwardmartinsjr/pucminas-bia-tcc
13b9e1c05f27a0be3906a8e4f8beeafeeacf7520
f6d108c56a66d8737c72613632eacf8ab038997e
refs/heads/main
2023-03-27T21:23:52.694519
2021-03-30T21:40:29
2021-03-30T21:40:29
338,130,280
0
0
null
null
null
null
UTF-8
Python
false
false
848
py
import unittest from unittest.mock import patch, mock_open import glob from datetime import datetime import os import product_category_name_translation as task from helpers import storage # START TESTS: class Test(unittest.TestCase): db_name = os.getenv('MYSQL_NAME') table_name = 'product_category_name_translation' file_full_path = './airflow-dag/dummy_files/'+table_name+'.csv' def test_clear_db_func(self): self.assertEqual(task.clear_db_func(self.db_name+'.'+self.table_name),True) def test_etl(self): df = storage.extract_data_from_csv(self.file_full_path) self.assertEqual(len(df.index),11) df = task.transform_data_func(df) self.assertEqual(len(df.index),10) self.assertEqual(storage.load_data_into_db(df, self.db_name, self.table_name),True) unittest.main()
[ "edwardmartinsjr@gmail.com" ]
edwardmartinsjr@gmail.com
9f172a0be7b6bf7913fb68633dc6687740df64a5
c376e2a22ff855447f444492aa064f5fb6db4df7
/github/models.py
faf395c5874f693a1894aa9cb74e23368c9a6714
[]
no_license
chriscauley/homer-test
54f0192fb7923e621f1e3a11046e4580b8d966b4
7194d7aaa08888c772dcf922d16a666a94bf6d48
refs/heads/master
2021-01-10T13:38:23.065811
2015-10-13T23:15:01
2015-10-13T23:15:01
44,202,532
0
0
null
null
null
null
UTF-8
Python
false
false
972
py
from django.conf import settings from django.db import models import requests,json GITHUB_QS = "?client_id={}&client_secret={}".format(settings.GITHUB_KEY,settings.GITHUB_SECRET) class Repository(models.Model): username = models.CharField(max_length=64) reponame = models.CharField(max_length=64) stars = models.IntegerField(default=0) watchers = models.IntegerField(default=0) forks = models.IntegerField(default=0) __unicode__ = lambda self: "%s/%s"%(self.username,self.reponame) def update(self): url = 'https://api.github.com/repos/{}'.format(unicode(self)) url += GITHUB_QS request = requests.get(url) data = json.loads(request.text) self.stars = data['stargazers_count'] self.watchers = data['subscribers_count'] self.forks = data['forks'] def save(self,*args,**kwargs): if not self.pk: self.update() super(Repository,self).save(*args,**kwargs) class Meta: unique_together = ('username','reponame')
[ "chris@lablackey.com" ]
chris@lablackey.com
fa604d1429ac58cdead80ef403a2aedc50358742
958e14811c8460b106d58494b7f06fbf6d05a19b
/Transducer Controller/customVal.py
2cb57508b2a65ec4659dbf2848b7653d65d5174f
[]
no_license
EllangoK/Somewhat-Secret-Summer-Project
28693fbaeba9fdc6beed909277aac811041c693d
79747ed68f0288005619a2a691b5516f113399e2
refs/heads/master
2020-12-02T22:17:23.868752
2017-07-25T12:39:59
2017-07-25T12:39:59
96,108,678
6
0
null
null
null
null
UTF-8
Python
false
false
1,005
py
from tkinter import * import sys root = Tk() def customVal(str): def getCustomAmt(): global customAmt customAmt = int(E1.get()) root.destroy() submit = Button(root, text ="Submit", command = getCustomAmt) if str == "inch": label1 = Label( root, text="Custom Value in Inches") E1 = Entry(root, bd =5) label1.pack() E1.pack() submit.pack(side =BOTTOM) root.mainloop() return customAmt elif str == "mm": label1 = Label( root, text="Custom Value in Millimeters") E1 = Entry(root, bd =5) label1.pack() E1.pack() submit.pack(side =BOTTOM) root.mainloop() return customAmt elif str == "degree": label1 = Label( root, text="Custom Value in Degrees") E1 = Entry(root, bd =5) label1.pack() E1.pack() submit.pack(side =BOTTOM) root.mainloop() return customAmt else: sys.exit()
[ "ellangok@bxscience.edu" ]
ellangok@bxscience.edu
bb1c785b5b1e2bf907546ba86b29d443cce9d162
6251392880ef6ef95962e76cf80b520386ef60ca
/more_ui.py
9fe68d620c56030b683d19e6722986589c043707
[]
no_license
Kartik1801/VOCA-Vocabulary-Building-Tool
207d5d3ade3f9ca96392b6d5785de6430bd337ee
f9c29b254ab9f953e7cd095cce8e2620ca50690d
refs/heads/main
2023-03-25T16:37:35.064192
2021-03-24T03:11:28
2021-03-24T03:11:28
349,098,683
0
0
null
null
null
null
UTF-8
Python
false
false
3,878
py
# -*- coding: utf-8 -*- # Form implementation generated from reading ui file 'more_menu.ui' # # Created by: PyQt5 UI code generator 5.15.2 # # WARNING: Any manual changes made to this file will be lost when pyuic5 is # run again. Do not edit this file unless you know what you are doing. from PyQt5 import QtCore, QtGui, QtWidgets class Ui_menu_window(object): def setupUi(self, menu_window): menu_window.setObjectName("menu_window") menu_window.resize(200, 250) menu_window.setMinimumSize(QtCore.QSize(200, 250)) menu_window.setMaximumSize(QtCore.QSize(200, 250)) self.centralwidget = QtWidgets.QWidget(menu_window) self.centralwidget.setObjectName("centralwidget") self.label = QtWidgets.QLabel(self.centralwidget) self.label.setGeometry(QtCore.QRect(0, 0, 200, 51)) self.label.setStyleSheet("color: rgb(255, 255, 255);\n" "font: 75 italic 20pt \"Times New Roman\";\n" "background-color: rgb(85, 0, 127);") self.label.setAlignment(QtCore.Qt.AlignCenter) self.label.setObjectName("label") self.verticalLayoutWidget = QtWidgets.QWidget(self.centralwidget) self.verticalLayoutWidget.setGeometry(QtCore.QRect(0, 50, 201, 201)) self.verticalLayoutWidget.setObjectName("verticalLayoutWidget") self.verticalLayout = QtWidgets.QVBoxLayout(self.verticalLayoutWidget) self.verticalLayout.setContentsMargins(0, 0, 0, 0) self.verticalLayout.setObjectName("verticalLayout") self.translator_button = QtWidgets.QPushButton(self.verticalLayoutWidget) self.translator_button.setEnabled(True) self.translator_button.setMinimumSize(QtCore.QSize(0, 40)) self.translator_button.setStyleSheet("background-color: rgb(255, 170, 0);\n" "font: 18pt \"Times New Roman\";\n" "color: rgb(255, 255, 255);") self.translator_button.setObjectName("translator_button") self.verticalLayout.addWidget(self.translator_button) self.Article_button = QtWidgets.QPushButton(self.verticalLayoutWidget) self.Article_button.setMinimumSize(QtCore.QSize(0, 40)) self.Article_button.setStyleSheet("background-color: rgb(255, 170, 0);\n" "font: 18pt \"Times New Roman\";\n" "color: rgb(255, 255, 255);") self.Article_button.setObjectName("Article_button") self.verticalLayout.addWidget(self.Article_button) self.Chat_button = QtWidgets.QPushButton(self.verticalLayoutWidget) self.Chat_button.setMinimumSize(QtCore.QSize(0, 40)) self.Chat_button.setStyleSheet("background-color: rgb(255, 170, 0);\n" "font: 18pt \"Times New Roman\";\n" "color: rgb(255, 255, 255);") self.Chat_button.setObjectName("Chat_button") self.verticalLayout.addWidget(self.Chat_button) self.Exit_button = QtWidgets.QPushButton(self.verticalLayoutWidget) self.Exit_button.setMinimumSize(QtCore.QSize(0, 40)) self.Exit_button.setStyleSheet("background-color: rgb(255, 170, 0);\n" "font: 18pt \"Times New Roman\";\n" "color: rgb(255, 255, 255);") self.Exit_button.setObjectName("Exit_button") self.verticalLayout.addWidget(self.Exit_button) menu_window.setCentralWidget(self.centralwidget) self.retranslateUi(menu_window) QtCore.QMetaObject.connectSlotsByName(menu_window) def retranslateUi(self, menu_window): _translate = QtCore.QCoreApplication.translate menu_window.setWindowTitle(_translate("menu_window", "More")) self.label.setText(_translate("menu_window", "Menu")) self.translator_button.setText(_translate("menu_window", "Translator")) self.Article_button.setText(_translate("menu_window", "Article")) self.Chat_button.setText(_translate("menu_window", "Chatbot")) self.Exit_button.setText(_translate("menu_window", "Exit")) import Main_rc
[ "kartikdhawan1801@gmail.com" ]
kartikdhawan1801@gmail.com
7ee87a0f8d74bda1e49ab34d62868a1af4f9ac09
5b78daff16dee50d3df8ffed82d155ece0ef0934
/echopy_doc.py
7f90046e0140d29d9242dc57a85ca36dca8f4298
[]
no_license
zpriddy/ZP_EchoST
9da4f78ae64cdd258354a84b7d07d934682b3ef2
2335a45c09c03238cc1e8e8fc20c8c8031a19b8d
refs/heads/master
2020-05-30T00:57:32.716491
2015-07-26T23:15:18
2015-07-26T23:15:18
39,745,746
3
3
null
null
null
null
UTF-8
Python
false
false
15,062
py
main_page=''' <!DOCTYPE html> <html lang="en"> <head> <meta charset="utf-8"> <meta name="viewport" content="width=device-width, initial-scale=1"> <link rel="stylesheet" href="https://maxcdn.bootstrapcdn.com/bootstrap/3.3.5/css/bootstrap.min.css"> <script src="https://ajax.googleapis.com/ajax/libs/jquery/1.11.3/jquery.min.js"></script> <script src="https://maxcdn.bootstrapcdn.com/bootstrap/3.3.5/js/bootstrap.min.js"></script> <script> (function(i,s,o,g,r,a,m){i['GoogleAnalyticsObject']=r;i[r]=i[r]||function(){ (i[r].q=i[r].q||[]).push(arguments)},i[r].l=1*new Date();a=s.createElement(o), m=s.getElementsByTagName(o)[0];a.async=1;a.src=g;m.parentNode.insertBefore(a,m) })(window,document,'script','//www.google-analytics.com/analytics.js','ga'); ga('create', 'UA-65257509-1', 'auto'); ga('send', 'pageview'); </script> </head> <nav class="navbar navbar-inverse"> <div class="container-fluid"> <!-- Brand and toggle get grouped for better mobile display --> <div class="navbar-header"> <button type="button" class="navbar-toggle collapsed" data-toggle="collapse" data-target="#bs-example-navbar-collapse-1" aria-expanded="false"> <span class="sr-only">Toggle navigation</span> <span class="icon-bar"></span> <span class="icon-bar"></span> <span class="icon-bar"></span> </button> <a class="navbar-brand" href="https://alexa.zpriddy.com">ZP Alexa Projects</a> </div> <!-- Collect the nav links, forms, and other content for toggling --> <div class="collapse navbar-collapse" id="bs-example-navbar-collapse-1"> <ul class="nav navbar-nav"> <li class="active"><a href="https://alexa.zpriddy.com">Home <span class="sr-only">(current)</span></a></li> <li><a href="#">SmartThings</a></li> <li><a href="https://alexa.zpriddy.com/nest">Nest</a></li> </ul> <ul class="nav navbar-nav navbar-right"> <li><a href="https://zpriddy.com">zpriddy.com</a></li> </ul> </div><!-- /.navbar-collapse --> </div><!-- /.container-fluid --> </nav> <div class="container"> <div class="row"> <div class="col-md-2"></div> <div class="col-md-8"> <div class="jumbotron"> <h2> Help Me Out!</h2> </div> <p>Please consider helping me out so that I can keep supporting this and other Open Source projects! I run all of this out of my pocket and it doesnt all come free.. Please consider helping me out so that I can keep everything running! </p> <p><a class="btn btn-primary btn-lg" href="https://cash.me/$ZPriddy" role="button">Donate via Square Cash!</a></p> </div> <div class="col-md-2"></div> </div> <div class="container"> <div class="panel panel-default"> <div class="panel-heading"> <h3 class="panel-title">Setting up Alexa with your SmartThings</h3> </div> <div class="panel-body"> To setup your Alexa to talk to SmartThings please go to my Github linked below and follow the README instructions. There are three other files that you will need in order to complete the process (Thats why I link to Github). <a class="btn btn-warning" href="https://github.com/zpriddy/ZP-Echo-ST-Beta" role="button">Git Hub</a> </div> </div> <div class="panel panel-default"> <div class="panel-heading"> <h3 class="panel-title">Quick Links</h3> </div> <div class="panel-body"> <a class="btn btn-success" href="https://alexa.zpriddy.com/alexa/auth" role="button">SmartThings Alexa Auth</a> <a class="btn btn-info" href="https://alexa.zpriddy.com/alexa/samples" role="button">SmartThings Alexa Smaples</a> </div> </div> </div> </div> ''' auth_page=''' <!DOCTYPE html> <html lang="en"> <head> <meta charset="utf-8"> <meta name="viewport" content="width=device-width, initial-scale=1"> <link rel="stylesheet" href="https://maxcdn.bootstrapcdn.com/bootstrap/3.3.5/css/bootstrap.min.css"> <script src="https://ajax.googleapis.com/ajax/libs/jquery/1.11.3/jquery.min.js"></script> <script src="https://maxcdn.bootstrapcdn.com/bootstrap/3.3.5/js/bootstrap.min.js"></script> <script> (function(i,s,o,g,r,a,m){i['GoogleAnalyticsObject']=r;i[r]=i[r]||function(){ (i[r].q=i[r].q||[]).push(arguments)},i[r].l=1*new Date();a=s.createElement(o), m=s.getElementsByTagName(o)[0];a.async=1;a.src=g;m.parentNode.insertBefore(a,m) })(window,document,'script','//www.google-analytics.com/analytics.js','ga'); ga('create', 'UA-65257509-1', 'auto'); ga('send', 'pageview'); </script> </head> <nav class="navbar navbar-inverse"> <div class="container-fluid"> <!-- Brand and toggle get grouped for better mobile display --> <div class="navbar-header"> <button type="button" class="navbar-toggle collapsed" data-toggle="collapse" data-target="#bs-example-navbar-collapse-1" aria-expanded="false"> <span class="sr-only">Toggle navigation</span> <span class="icon-bar"></span> <span class="icon-bar"></span> <span class="icon-bar"></span> </button> <a class="navbar-brand" href="https://alexa.zpriddy.com">ZP Alexa Projects</a> </div> <!-- Collect the nav links, forms, and other content for toggling --> <div class="collapse navbar-collapse" id="bs-example-navbar-collapse-1"> <ul class="nav navbar-nav"> <li><a href="https://alexa.zpriddy.com">Home</a></li> <li class="active"><a href="#">SmartThings<span class="sr-only">(current)</span></a></li> <li><a href="#">Nest</a></li> </ul> <ul class="nav navbar-nav navbar-right"> <li><a href="https://zpriddy.com">zpriddy.com</a></li> </ul> </div><!-- /.navbar-collapse --> </div><!-- /.container-fluid --> </nav> <div class="container"> <form action="auth" method="post"> <div class="form-group"> <label for="AlexaID">Alexa ID</label> <input id="AlexaID" name="AlexaID" type="text" class="form-control" title="Alexa ID. This is a required field"> </div> <div class="form-group"> <label for="SmartThingsClientID">SmartThings Client ID</label> <input id="SmartThingsClientID" name="SmartThingsClientID" type="text" class="form-control" title="SmartThings Client ID. This is a required field"> </div> <div class="form-group"> <label for="SmartThingsClientSecret">SmartThings Client Secret </label> <input id="SmartThingsClientSecret" name="SmartThingsClientSecret" type="text" class="form-control" title="SmartThings Client Secret . This is a required field"> </div> <input type="submit" value="Authorize" class="btn btn-default"> </form> </div> ''' nest_page=''' <!DOCTYPE html> <html lang="en"> <head> <meta charset="utf-8"> <meta name="viewport" content="width=device-width, initial-scale=1"> <link rel="stylesheet" href="https://maxcdn.bootstrapcdn.com/bootstrap/3.3.5/css/bootstrap.min.css"> <script src="https://ajax.googleapis.com/ajax/libs/jquery/1.11.3/jquery.min.js"></script> <script src="https://maxcdn.bootstrapcdn.com/bootstrap/3.3.5/js/bootstrap.min.js"></script> <script> (function(i,s,o,g,r,a,m){i['GoogleAnalyticsObject']=r;i[r]=i[r]||function(){ (i[r].q=i[r].q||[]).push(arguments)},i[r].l=1*new Date();a=s.createElement(o), m=s.getElementsByTagName(o)[0];a.async=1;a.src=g;m.parentNode.insertBefore(a,m) })(window,document,'script','//www.google-analytics.com/analytics.js','ga'); ga('create', 'UA-65257509-1', 'auto'); ga('send', 'pageview'); </script> </head> <nav class="navbar navbar-inverse"> <div class="container-fluid"> <!-- Brand and toggle get grouped for better mobile display --> <div class="navbar-header"> <button type="button" class="navbar-toggle collapsed" data-toggle="collapse" data-target="#bs-example-navbar-collapse-1" aria-expanded="false"> <span class="sr-only">Toggle navigation</span> <span class="icon-bar"></span> <span class="icon-bar"></span> <span class="icon-bar"></span> </button> <a class="navbar-brand" href="https://alexa.zpriddy.com">ZP Alexa Projects</a> </div> <!-- Collect the nav links, forms, and other content for toggling --> <div class="collapse navbar-collapse" id="bs-example-navbar-collapse-1"> <ul class="nav navbar-nav"> <li><a href="https://alexa.zpriddy.com">Home</a></li> <li><a href="#">SmartThings</a></li> <li class="active"><a href="https://alexa.zpriddy.com/nest">Nest <span class="sr-only">(current)</span></a></li> </ul> <ul class="nav navbar-nav navbar-right"> <li><a href="https://zpriddy.com">zpriddy.com</a></li> </ul> </div><!-- /.navbar-collapse --> </div><!-- /.container-fluid --> </nav> <div class="container"> <div class="row"> <div class="col-md-2"></div> <div class="col-md-8"> <div class="jumbotron"> <h2> Help Me Out!</h2> </div> <p>Please consider helping me out so that I can keep supporting this and other Open Source projects! I run all of this out of my pocket and it doesnt all come free.. Please consider helping me out so that I can keep everything running! </p> <p><a class="btn btn-primary btn-lg" href="https://cash.me/$ZPriddy" role="button">Donate via Square Cash!</a></p> </div> <div class="col-md-2"></div> </div> <div class="container"> <div class="panel panel-default"> <div class="panel-heading"> <h3 class="panel-title">Setting up Nest with your SmartThings</h3> </div> <div class="panel-body"> Comming Soon </div> </div> <div class="panel panel-default"> <div class="panel-heading"> <h3 class="panel-title">Quick Links</h3> </div> <div class="panel-body"> <a class="btn btn-success" href="https://alexa.zpriddy.com/alexa/auth" role="button">SmartThings Alexa Auth</a> <a class="btn btn-info" href="https://alexa.zpriddy.com/alexa/samples" role="button">SmartThings Alexa Smaples</a> </div> </div> </div> </div> ''' samples_page=''' <!DOCTYPE html> <html lang="en"> <head> <meta charset="utf-8"> <meta name="viewport" content="width=device-width, initial-scale=1"> <link rel="stylesheet" href="https://maxcdn.bootstrapcdn.com/bootstrap/3.3.5/css/bootstrap.min.css"> <script src="https://ajax.googleapis.com/ajax/libs/jquery/1.11.3/jquery.min.js"></script> <script src="https://maxcdn.bootstrapcdn.com/bootstrap/3.3.5/js/bootstrap.min.js"></script> <script> (function(i,s,o,g,r,a,m){i['GoogleAnalyticsObject']=r;i[r]=i[r]||function(){ (i[r].q=i[r].q||[]).push(arguments)},i[r].l=1*new Date();a=s.createElement(o), m=s.getElementsByTagName(o)[0];a.async=1;a.src=g;m.parentNode.insertBefore(a,m) })(window,document,'script','//www.google-analytics.com/analytics.js','ga'); ga('create', 'UA-65257509-1', 'auto'); ga('send', 'pageview'); </script> </head> <nav class="navbar navbar-inverse"> <div class="container-fluid"> <!-- Brand and toggle get grouped for better mobile display --> <div class="navbar-header"> <button type="button" class="navbar-toggle collapsed" data-toggle="collapse" data-target="#bs-example-navbar-collapse-1" aria-expanded="false"> <span class="sr-only">Toggle navigation</span> <span class="icon-bar"></span> <span class="icon-bar"></span> <span class="icon-bar"></span> </button> <a class="navbar-brand" href="https://alexa.zpriddy.com">ZP Alexa Projects</a> </div> <!-- Collect the nav links, forms, and other content for toggling --> <div class="collapse navbar-collapse" id="bs-example-navbar-collapse-1"> <ul class="nav navbar-nav"> <li><a href="https://alexa.zpriddy.com">Home</a></li> <li class="active"><a href="#">SmartThings<span class="sr-only">(current)</span></a></li> <li><a href="#">Nest</a></li> </ul> <ul class="nav navbar-nav navbar-right"> <li><a href="https://zpriddy.com">zpriddy.com</a></li> </ul> </div><!-- /.navbar-collapse --> </div><!-- /.container-fluid --> </nav> <div class="container"> <form action="samples" method="post"> <div class="form-group"> <label for="AlexaID">Alexa ID</label> <input id="AlexaID" name="AlexaID" type="text" class="form-control" title="Alexa ID. This is a required field"> </div> <input type="submit" value="Get Samples" class="btn btn-default"> </form> </div> ''' samples_results=''' <!DOCTYPE html> <html lang="en"> <head> <meta charset="utf-8"> <meta name="viewport" content="width=device-width, initial-scale=1"> <link rel="stylesheet" href="https://maxcdn.bootstrapcdn.com/bootstrap/3.3.5/css/bootstrap.min.css"> <script src="https://ajax.googleapis.com/ajax/libs/jquery/1.11.3/jquery.min.js"></script> <script src="https://maxcdn.bootstrapcdn.com/bootstrap/3.3.5/js/bootstrap.min.js"></script> <script> (function(i,s,o,g,r,a,m){i['GoogleAnalyticsObject']=r;i[r]=i[r]||function(){ (i[r].q=i[r].q||[]).push(arguments)},i[r].l=1*new Date();a=s.createElement(o), m=s.getElementsByTagName(o)[0];a.async=1;a.src=g;m.parentNode.insertBefore(a,m) })(window,document,'script','//www.google-analytics.com/analytics.js','ga'); ga('create', 'UA-65257509-1', 'auto'); ga('send', 'pageview'); </script> </head> <nav class="navbar navbar-inverse"> <div class="container-fluid"> <!-- Brand and toggle get grouped for better mobile display --> <div class="navbar-header"> <button type="button" class="navbar-toggle collapsed" data-toggle="collapse" data-target="#bs-example-navbar-collapse-1" aria-expanded="false"> <span class="sr-only">Toggle navigation</span> <span class="icon-bar"></span> <span class="icon-bar"></span> <span class="icon-bar"></span> </button> <a class="navbar-brand" href="https://alexa.zpriddy.com">ZP Alexa Projects</a> </div> <!-- Collect the nav links, forms, and other content for toggling --> <div class="collapse navbar-collapse" id="bs-example-navbar-collapse-1"> <ul class="nav navbar-nav"> <li><a href="https://alexa.zpriddy.com">Home</a></li> <li class="active"><a href="#">SmartThings<span class="sr-only">(current)</span></a></li> <li><a href="#">Nest</a></li> </ul> <ul class="nav navbar-nav navbar-right"> <li><a href="https://zpriddy.com">zpriddy.com</a></li> </ul> </div><!-- /.navbar-collapse --> </div><!-- /.container-fluid --> </nav> <div class="container"> <h1>Sample Resultss</h1> <p> Please copy and paste the results below into your Alexa SampleUtterances in the ASK portal. </p> <div class="panel panel-primary"> <div class="panel-heading">Sample</div> <div class="panel-body" style="max-height: 10;overflow-y: scroll;">RESULTS</div> </div> </div> </div> ''' NotNestUser = {"outputSpeech": {"type":"PlainText","text":"Current user is not a valid nest user. Please look for help"},"card":{"type":"Simple","title":"Nest Control Error","content":"Current user is not a valid nest user. Please look for help"},'shouldEndSession':True}
[ "me@zpriddy.com" ]
me@zpriddy.com
5db951da7ed2b02188b05cf95f49228292dd84d8
968caaa224f3dba49053ccc5e968e30296813f86
/socket_client.py
c294d0304c221f8c3a0da26589422a2d7c68ad81
[]
no_license
k4is3r/kivy-chat-python
f6bf2eda0ed5c47f423b0ffac2fa9d15eee6e28a
926b45beac14b17cdf57c462f1b17051ce3971ce
refs/heads/master
2022-12-12T18:54:23.013987
2020-01-06T16:14:51
2020-01-06T16:14:51
232,650,507
0
0
null
2022-12-08T07:02:27
2020-01-08T20:16:57
Python
UTF-8
Python
false
false
1,808
py
import socket import errno from threading import Thread HEADER_LENGTH = 10 client_socket = None # Connects to the server def connect(ip, port, my_username, error_callback): global client_socket client_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM) try: client_socket.connect((ip, port)) except Exception as e: error_callback('Connection error: {}'.format(str(e))) return False username = my_username.encode('utf-8') username_header = f"{len(username):<{HEADER_LENGTH}}".encode('utf-8') client_socket.send(username_header + username) return True def send(message): message = message.encode('utf-8') message_header = f"{len(message):<{HEADER_LENGTH}}".encode('utf-8') client_socket.send(message_header + message) def start_listening(incoming_message_callback, error_callback): Thread(target=listen, args=(incoming_message_callback, error_callback), daemon=True).start() def listen(incoming_message_callback, error_callback): while True: try: while True: username_header = client_socket.recv(HEADER_LENGTH) if not len(username_header): error_callback('Connection closed by the server') username_length = int(username_header.decode('utf-8').strip()) username = client_socket.recv(username_length).decode('utf-8') message_header = client_socket.recv(HEADER_LENGTH) message_length = int(message_header.decode('utf-8').strip()) message = client_socket.recv(message_length).decode('utf-8') incoming_message_callback(username, message) except Exception as e: error_callback('Reading error: {}'.format(str(e)))
[ "ejwimery@gmail.com" ]
ejwimery@gmail.com
225e4818b7f85da851001d3586dfc20f0fe2992d
8211380d35f6c553a5cc8a4043144c0afe2b66a2
/loglizer/models/InvariantsMiner.py
b352da0d47836ab163b3a7dcec7df882fe458dde
[]
no_license
real-lhj/Log-Analysis-for-Anomaly-Detection
3aa88fd16b54f726cfc9017e8599d22f686d49d3
10e5d706bdc798c44cbc508b7d9fcc2e15fbfda5
refs/heads/master
2023-03-17T06:28:12.509865
2020-02-11T01:53:17
2020-02-11T01:53:17
null
0
0
null
null
null
null
UTF-8
Python
false
false
11,031
py
""" The implementation of Invariants Mining model for anomaly detection. Authors: LogPAI Team Reference: [1] Jian-Guang Lou, Qiang Fu, Shengqi Yang, Ye Xu, Jiang Li. Mining Invariants from Console Logs for System Problem Detection. USENIX Annual Technical Conference (ATC), 2010. """ import numpy as np from itertools import combinations from ..utils import metrics class InvariantsMiner(object): def __init__(self, percentage=0.98, epsilon=0.5, longest_invarant=None, scale_list=[1, 2, 3]): """ The Invariants Mining model for anomaly detection Attributes ---------- percentage: float, percentage of samples satisfying the condition that |X_j * V_i| < epsilon epsilon: float, the threshold for estimating the invariant space longest_invarant: int, the specified maximal length of invariant, default to None. Stop searching when the invariant length is larger than longest_invarant. scale_list: list, the list used to scale the theta of float into integer invariants_dict: dict, dictionary of invariants where key is the selected columns and value is the weights the of invariant """ self.percentage = percentage self.epsilon = epsilon self.longest_invarant = longest_invarant self.scale_list = scale_list self.invariants_dict = None def fit(self, X): """ Arguments --------- X: ndarray, the event count matrix of shape num_instances-by-num_events """ print('====== Model summary ======') invar_dim = self._estimate_invarant_space(X) self._invariants_search(X, invar_dim) def predict(self, X): """ Predict anomalies with mined invariants Arguments --------- X: the input event count matrix Returns ------- y_pred: ndarray, the predicted label vector of shape (num_instances,) """ y_sum = np.zeros(X.shape[0]) for cols, theta in self.invariants_dict.items(): y_sum += np.fabs(np.dot(X[:, cols], np.array(theta))) y_pred = (y_sum > 1e-6).astype(int) return y_pred def evaluate(self, X, y_true): print('====== Evaluation summary ======') y_pred = self.predict(X) precision, recall, f1 = metrics(y_pred, y_true) print('Precision: {:.3f}, recall: {:.3f}, F1-measure: {:.3f}\n'.format(precision, recall, f1)) return precision, recall, f1 def _estimate_invarant_space(self, X): """ Estimate the dimension of invariant space using SVD decomposition Arguments --------- X: ndarray, the event count matrix of shape num_instances-by-num_events percentage: float, percentage of samples satisfying the condition that |X_j * V_i| < epsilon epsilon: float, the threshold for estimating the invariant space Returns ------- r: the dimension of invariant space """ covariance_matrix = np.dot(X.T, X) U, sigma, V = np.linalg.svd(covariance_matrix) # SVD decomposition # Start from the right most column of matrix V, sigular values are in ascending order num_instances, num_events = X.shape r = 0 for i in range(num_events - 1, -1, -1): zero_count = sum(abs(np.dot(X, U[:, i])) < self.epsilon) if zero_count / float(num_instances) < self.percentage: break r += 1 print('Invariant space dimension: {}'.format(r)) return r def _invariants_search(self, X, r): """ Mine invariant relationships from X Arguments --------- X: ndarray, the event count matrix of shape num_instances-by-num_events r: the dimension of invariant space """ num_instances, num_events = X.shape invariants_dict = dict() # save the mined Invariants(value) and its corresponding columns(key) search_space = [] # only invariant candidates in this list are valid # invariant of only one column (all zero columns) init_cols = sorted([[item] for item in range(num_events)]) for col in init_cols: search_space.append(col) init_col_list = init_cols[:] for col in init_cols: if np.count_nonzero(X[:, col]) == 0: invariants_dict[tuple(col)] = [1] search_space.remove(col) init_col_list.remove(col) item_list = init_col_list length = 2 FLAG_break_loop = False # check invariant of more columns while len(item_list) != 0: if self.longest_invarant and len(item_list[0]) >= self.longest_invarant: break joined_item_list = self._join_set(item_list, length) # generate new invariant candidates for items in joined_item_list: if self._check_candi_valid(items, length, search_space): search_space.append(items) item_list = [] for item in joined_item_list: if tuple(item) in invariants_dict: continue if item not in search_space: continue if not self._check_candi_valid(tuple(item), length, search_space) and length > 2: search_space.remove(item) continue # an item must be superset of all other subitems in searchSpace, else skip validity, scaled_theta = self._check_invar_validity(X, item) if validity: self._prune(invariants_dict.keys(), set(item), search_space) invariants_dict[tuple(item)] = scaled_theta.tolist() search_space.remove(item) else: item_list.append(item) if len(invariants_dict) >= r: FLAG_break_loop = True break if FLAG_break_loop: break length += 1 print('Mined {} invariants: {}\n'.format(len(invariants_dict), invariants_dict)) self.invariants_dict = invariants_dict def _compute_eigenvector(self, X): """ calculate the smallest eigenvalue and corresponding eigenvector (theta in the paper) for a given sub_matrix Arguments --------- X: the event count matrix (each row is a log sequence vector, each column represents an event) Returns ------- min_vec: the eigenvector of corresponding minimum eigen value FLAG_contain_zero: whether the min_vec contains zero (very small value) """ FLAG_contain_zero = False count_zero = 0 dot_result = np.dot(X.T, X) U, S, V = np.linalg.svd(dot_result) min_vec = U[:, -1] count_zero = sum(np.fabs(min_vec) < 1e-6) if count_zero != 0: FLAG_contain_zero = True return min_vec, FLAG_contain_zero def _check_invar_validity(self, X, selected_columns): """ scale the eigenvector of float number into integer, and check whether the scaled number is valid Arguments --------- X: the event count matrix (each row is a log sequence vector, each column represents an event) selected_columns: select columns from all column list Returns ------- validity: whether the selected columns is valid scaled_theta: the scaled theta vector """ sub_matrix = X[:, selected_columns] inst_num = X.shape[0] validity = False min_theta, FLAG_contain_zero = self._compute_eigenvector(sub_matrix) abs_min_theta = [np.fabs(it) for it in min_theta] if FLAG_contain_zero: return validity, [] else: for i in self.scale_list: min_index = np.argmin(abs_min_theta) scale = float(i) / min_theta[min_index] scaled_theta = np.array([round(item * scale) for item in min_theta]) scaled_theta[min_index] = i scaled_theta = scaled_theta.T if 0 in np.fabs(scaled_theta): continue dot_submat_theta = np.dot(sub_matrix, scaled_theta) count_zero = 0 for j in dot_submat_theta: if np.fabs(j) < 1e-8: count_zero += 1 if count_zero >= self.percentage * inst_num: validity = True # print('A valid invariant is found: ',scaled_theta, selected_columns) break return validity, scaled_theta def _prune(self, valid_cols, new_item_set, search_space): """ prune invalid combination of columns Arguments --------- valid_cols: existing valid column list new_item_set: item set to be merged search_space: the search space that stores possible candidates """ if len(valid_cols) == 0: return for se in valid_cols: intersection = set(se) & new_item_set if len(intersection) == 0: continue union = set(se) | new_item_set for item in list(intersection): diff = sorted(list(union - set([item]))) if diff in search_space: search_space.remove(diff) def _join_set(self, item_list, length): """ Join a set with itself and returns the n-element (length) itemsets Arguments --------- item_list: current list of columns length: generate new items of length Returns ------- return_list: list of items of length-element """ set_len = len(item_list) return_list = [] for i in range(set_len): for j in range(i + 1, set_len): i_set = set(item_list[i]) j_set = set(item_list[j]) if len(i_set.union(j_set)) == length: joined = sorted(list(i_set.union(j_set))) if joined not in return_list: return_list.append(joined) return_list = sorted(return_list) return return_list def _check_candi_valid(self, item, length, search_space): """ check whether an item's subitems are in searchspace Arguments --------- item: item to be checked length: the length of item search_space: the search space that stores possible candidates Returns ------- True or False """ for subItem in combinations(item, length - 1): if sorted(list(subItem)) not in search_space: return False return True
[ "davide97ls@gmail.com" ]
davide97ls@gmail.com
f5b129dfb19b7d434d24965972ae34a607146276
b31c9ec80def3ff6db8d0aaa83831e44ff4752c5
/ppc300_Score/fhq2017_ppc300_ans.py
a691f2e802ef1ceb526b40ca9888c1e6ffb3155d
[ "MIT" ]
permissive
freehackquest/2017-tasks
1cfd0d1f9b4f859124f3a61c4bfa6411cc936bdb
6f8dd3791f062664dd4d642468dad11b7024e3f6
refs/heads/master
2021-05-15T16:47:52.513738
2017-10-20T03:33:36
2017-10-20T03:33:36
107,542,583
2
0
null
null
null
null
UTF-8
Python
false
false
743
py
#!/usr/bin/env python # -*- coding: utf-8 -*- import hashlib f_in = open('ppc300.txt', 'r') f_out = open('ppc300_ans.txt', 'w') ans = 0 while 1: res = f_in.readline().split() if len(res) != 2: break n = int(res[0]) m = int(res[1]) print 'dano)', n, m res = 0 step = m - 1 ranges = 1 if m == 1: res = n else: while 1: ranges += step if ranges > n: break res += (n - ranges) + 1 print 'res)', res ans += res f_out.write(str(res) + '\n') f_in.close() f_out.close() res = str(ans) print 'fhq{' + str(hashlib.md5(res).hexdigest()) + '}' # fhq{758c6a98578de0cb9fb89347e4fb0fad}
[ "mrseakg@gmail.com" ]
mrseakg@gmail.com
e6d213871acea614d96881188679c65e7d1629d2
d750e292cb7dd8c5dc82572d891b6d2355f9a6db
/PyFlow/Packages/PyflowBase/Tools/AlignTopTool.py
d8a6e2b7cb801d414d97e6894039d8b7140dae83
[ "MIT" ]
permissive
jdrese/PyFlow
68205a8f66a429891d657a00ba1e3ae91f4d5b24
3d25e95e0dacbc040ea8634ddfa723eb2d1a2a03
refs/heads/master
2020-05-26T21:39:22.358941
2019-05-23T20:16:18
2019-05-23T20:16:18
188,383,493
2
0
MIT
2019-05-24T08:27:06
2019-05-24T08:27:06
null
UTF-8
Python
false
false
695
py
from nine import str from PyFlow.UI.Tool.Tool import ShelfTool from PyFlow.Packages.PyflowBase.Tools import RESOURCES_DIR from PyFlow.Core.Common import Direction from Qt import QtGui from Qt.QtWidgets import QFileDialog class AlignTopTool(ShelfTool): """docstring for AlignTopTool.""" def __init__(self): super(AlignTopTool, self).__init__() @staticmethod def toolTip(): return "Aligns selected nodes by top most node" @staticmethod def getIcon(): return QtGui.QIcon(RESOURCES_DIR + "aligntop.png") @staticmethod def name(): return str("AlignTopTool") def do(self): self.canvas.alignSelectedNodes(Direction.Up)
[ "shermand07@mail.ru" ]
shermand07@mail.ru
56f13ce893b3bf651f9bc4d9640eae6d032e5cc7
6abc9b7e59aa2bc77d16bf0579bc2319db4fa20c
/miniverse/core/models.py
8bd56b62ceb2078eb06c7be9d6def6cdda7541f7
[ "MIT" ]
permissive
IQSS/old-miniverse
b05823891fafd40a5b12f18894f3dff19404fe37
daabcad2fbd6cc29cc05f0091f51157e4fe9e46a
refs/heads/master
2021-01-21T03:15:54.392430
2014-06-27T16:05:55
2014-06-27T16:05:55
19,803,423
0
2
null
null
null
null
UTF-8
Python
false
false
331
py
from django.db import models class TimeStampedModel(models.Model): """ An abstract base class model that provides self-updating "created" and "modified" fields. """ created = models.DateTimeField(auto_now_add=True) modified = models.DateTimeField(auto_now=True) class Meta: abstract = True
[ "raman_prasad@harvard.edu" ]
raman_prasad@harvard.edu
276e474afa999cf3c9776d1f0b6a696987d8a34f
d51b4c766661af65b4ee6e7c30f8cb4bdd8603e3
/python/algorithm/leetcode/1206.py
ef480cffa3025c1d785bc57a77654dcb02f0c3f1
[]
no_license
yanxurui/keepcoding
3e988c76b123d55b32cf7cc35fbffb12c4ccb095
d6b9f07e2d1437681fa77fee0687ea9b83cab135
refs/heads/master
2021-01-24T09:01:41.306597
2020-05-21T05:36:04
2020-05-21T05:36:04
93,400,267
1
1
null
null
null
null
UTF-8
Python
false
false
1,698
py
# https://leetcode.com/problems/design-skiplist/discuss/393713/Python-1-node-per-value-and-100 from random import random from math import log class Node: def __init__(self, val, levels): self.val = val self.levels = [None] * levels class Skiplist: def __init__(self): self.head = Node(-1, 16) def _iter(self, target): cur = self.head for l in range(15, -1, -1): while True: future = cur.levels[l] if future and target > future.val: cur = future else: break yield cur, l # cur is not None def search(self, target: int) -> bool: for cur,l in self._iter(target): pass nxt = cur.levels[0] return nxt and nxt.val == target def add(self, num: int) -> None: node_levels = min(16, 1 + int(log(1/random(), 2))) node = Node(num, node_levels) for cur, l in self._iter(num): if l < node_levels: node.levels[l] = cur.levels[l] cur.levels[l] = node def erase(self, num: int) -> bool: rst = False for cur, l in self._iter(num): nxt = cur.levels[l] if nxt and nxt.val == num: rst = True cur.levels[l] = cur.levels[l].levels[l] return rst # Your Skiplist object will be instantiated and called as such: skiplist = Skiplist() skiplist.add(1) skiplist.add(2) skiplist.add(3) assert skiplist.search(0) is False skiplist.add(4) assert skiplist.search(1) assert skiplist.erase(0) is False assert skiplist.erase(1) assert skiplist.search(1) is False
[ "617080352@qq.com" ]
617080352@qq.com
0ca7092994a6088b4a2f108b3e13a5b4b5c3e963
f351e226dacfb7b7894ac39e6588c90351f54b4c
/Qshop/Shop/serdMail.py
e6479ce748613f520527c0051573116a014295a2
[]
no_license
1846515672/Web-django
a25d49d5629da58683b4ed59eb7316494c007706
941866eafa6bde2bada9ad69ce12aa36ff5a5ae2
refs/heads/master
2022-12-10T11:01:31.067823
2019-11-02T09:37:10
2019-11-02T09:37:10
219,131,697
0
0
null
2022-12-04T18:37:46
2019-11-02T09:29:31
CSS
UTF-8
Python
false
false
1,781
py
import smtplib from email.mime.text import MIMEText content = """啊哈,我来了"""#如果将plain改为html,则可以在这里添加html网址 sender = "2719929303@qq.com"#自己的qq号 recver = """ 794067332@qq.com, 876911388@qq.com, 329844268@qq.com, 1985054961@qq.com, 1502377018@qq.com, 1339566602@qq.com, 15733129082@163.com, 1693580010@qq.com, 1015174363@qq.com, 1320629993@qq.com, 2816474335@qq.com, """#对方的QQ号 password = "bveikvhxwpeodgji" #构建邮件格式 message = MIMEText(content, "plain" "utf-8")#可以将plain改为html message["To"] = recver message["From"] = sender message["Subject"] = "你猜" #发送邮件 smtp = smtplib.SMTP_SSL("smtp.qq.com",465) smtp.login(sender, password) smtp.sendmail(sender, recver.split(",\n"), message.as_string()) smtp.close() #------------------------------------------------------------------------ # def sendMail(content,email): # 第三方 SMTP 服务 # receivers = email # 接收邮件,可设置为自己的邮箱或者其他邮箱 # subject = 'Python SMTP 邮件测试' # content = """ # 如果确认是本人修改密码,请点击下方链接进行修改密码, # <a href="%s">点击链接确认</a> # """ % content # # message = MIMEText(content, 'plain', 'utf-8') # message['From'] = Header(MAIL_SENDER, 'utf-8') # message['To'] = Header(email, 'utf-8') # message['Subject'] = Header(subject, 'utf-8') # # try: # smtpObj = smtplib.SMTP() # smtpObj.connect(MAIL_SERVER, 25) # 25 为 SMTP 端口号 # smtpObj.login(MAIL_USER, MAIL_PASSWORD) # smtpObj.sendmail(MAIL_SENDER,email, message.as_string()) # print("邮件发送成功") # except smtplib.SMTPException as e: # print(e)
[ "1846515672@qq.com" ]
1846515672@qq.com
65b2c4f3dbfcc617bb3578be50327d9b46b23b94
77a79db13c04c035e5f42b136bf37de2bedec52f
/LIB/build/lib/python/discretize.py
118cf036138b2fb594d0743ccbc5e6750f233dda
[ "MIT" ]
permissive
sarthak-chakraborty/CausIL
6e71ea4091c1b5bcf7aff5469926e80fc99282a1
31e2ca0fbf9f2a6e9416020f1c8cb8a6d2371f97
refs/heads/main
2023-07-08T15:35:02.861313
2023-07-03T16:08:59
2023-07-03T16:08:59
597,629,481
8
1
null
2023-02-27T05:32:56
2023-02-05T05:50:42
Python
UTF-8
Python
false
false
10,547
py
# Contributors: Siddharth Jain, Aayush Makharia, Vineet Malik, Sourav Suman, Ayush Chauhan, Gaurav Sinha # Owned by: Adobe Corporation import pandas as pd from feature_engine import discretisers as dsc from sklearn.preprocessing import KBinsDiscretizer import sys # sys.path.append('./libraries/') from libraries.caim_test import CAIMD import numpy as np from python.ci_tests import fast_conditional_ind_test as FCIT from multiprocessing import Pool from python.bnutils import one_hot_encoder from sklearn.preprocessing import StandardScaler # Implements multiple discretization techniques for continuous variables class Decision_Tree_Discretizer: """Discretizes a continuous variable using Decision tree classifier Args: score: score to be considered for discretization **kwargs: dictionary of parameters for DecisionTreeDiscretiser kwargs format with default values: {'cv': 10, 'regression': False, 'max_depth': [1,2,3], 'max_samples_leaf': [10, 4]} """ def __init__(self, score='accuracy', **kwargs): self.cv = kwargs.get('cv', 10) self.scoring = score self.regression = kwargs.get('regression', False), self.param_grid = { 'max_depth': kwargs.get('max_depth', [1, 2, 3]), 'min_samples_leaf': kwargs.get('max_samples_leaf', [10, 4]) } def fit(self, data, node, target, **kwargs): self.node = node self.disc = dsc.DecisionTreeDiscretiser(cv=self.cv, scoring=self.scoring, variables=[node], regression=False, param_grid=self.param_grid) self.disc.fit(data[[node, target]], data[target]) print(self.disc.scores_dict_[node]) return self, self.disc.scores_dict_[node] def transform(self, data): print(data) return self.disc.transform(data[[data.columns[0], data.columns[1]]])[self.node] def unsupervised_discretization(df, node_list, bins, discretization_type): """Bins continuous data into intervals. Args: df : pandas dataframe object wtih mixed data node_list : list of continuous nodes bins : number of intervals with equal width discretization_type : takes one of the following values - uniform : generates bins of equal width frequency : generates bins of equal frequency K-means : generates bins using kmeans algorithm Returns: dataframe with discretized columns appended in df """ discretizer = None if discretization_type == 'uniform': discretizer = KBinsDiscretizer(n_bins=bins, encode='ordinal', strategy='uniform') elif discretization_type == 'quantile': discretizer = KBinsDiscretizer(n_bins=bins, encode='ordinal', strategy='quantile') elif discretization_type == 'kmeans': discretizer = KBinsDiscretizer(n_bins=bins, encode='ordinal', strategy='kmeans') else: raise NotImplementedError("Invalid discretization type") df[node_list] = discretizer.fit_transform(df[node_list]) return df def get_laim(sp, scheme, xi, y): """ LAIM score for discretization Args: sp : indexes of x corresponding to each bin scheme : set of thresholds for the discretized bins xi : attribute being discretized y : target to be used for discretization of xi Returns: LAIM score """ sp.insert(0, 0) sp.append(xi.shape[0]) n = len(sp) - 1 M = 0 laim = 0 for r in range(n): init = sp[r] fin = sp[r + 1] val, counts = np.unique(y[init:fin], return_counts=True) if val[0] == -1e10: val = val[1:] counts = counts[1:] Mr = counts.sum() maxr = counts.max() laim += (maxr / Mr) * maxr M += Mr laim /= n * M return laim def get_ameva(sp, scheme, xi, y): """ Ameva score for discretization Args: sp : indexes of x corresponding to each bin scheme : set of thresholds for the discretized bins xi : attribute being discretized y : target to be used for discretization of xi Returns: Ameva score """ sp.insert(0, 0) sp.append(xi.shape[0]) n = len(sp) - 1 label, label_counts = np.unique(y, return_counts=True) M_label = dict() for j in range(len(label)): M_label[label[j]] = label_counts[j] M = 0 ameva = 0 for r in range(n): init = sp[r] fin = sp[r + 1] val, counts = np.unique(y[init:fin], return_counts=True) Mr = counts.sum() for j in range(len(val)): ameva += (counts[j] / Mr) * (counts[j] / M_label[val[j]]) M += Mr ameva = (M * (ameva - 1)) / (n * (len(label) - 1)) return ameva def get_mlameva(sp, scheme, xi, y): """ Multi Label Ameva score for discretization Args: sp : indexes of x corresponding to each bin scheme : set of thresholds for the discretized bins xi : attribute being discretized y : target to be used for discretization of xi Returns: MLAmeva score """ sp.insert(0, 0) sp.append(xi.shape[0]) n = len(sp) - 1 label, label_counts = np.unique(y, return_counts=True) if label[0] == -1e10: label = label[1:] label_counts = label_counts[1:] M_label = dict() for j in range(len(label)): M_label[label[j]] = label_counts[j] M = 0 mlameva = 0 for r in range(n): init = sp[r] fin = sp[r + 1] val, counts = np.unique(y[init:fin], return_counts=True) if val[0] == -1e10: val = val[1:] counts = counts[1:] Mr = counts.sum() for j in range(len(val)): mlameva += (counts[j] / Mr) * (counts[j] / M_label[val[j]]) M += Mr mlameva = (M * (mlameva - 1)) / (n * (len(label) - 1)) return mlameva def parallel(args): if FCIT(args[0], args[3], args[4], args[5], nodes=args[1], onehot_dict=args[2]) > args[6]: return args[4] return -1 class Data_Driven_Discretizer: """ Used to discretize a set of variables using the inter-dependence information available before hand. Args: data : data to be discretized skel : the inter-dependence knowledge available before-hand nodes : a dict containing extra information about the variables max_process : max no of processes to create during parallelization method : core discretization technique to be used Returns: discretized data """ def __init__(self, data, skel, cond_check_skel, nodes, alpha=0.1, max_process=10, discretizer=CAIMD, method=get_mlameva): self.data = data self.alpha = alpha self.max_process = max_process self.skel = skel self.cond_check_skel = cond_check_skel self.nodes = nodes self.disc_data = data.copy() self.cont_list = [node[0] for node in self.nodes if node[1]['type'] == 'cont'] self.disc_list = [node[0] for node in self.nodes if node[1]['type'] == 'disc'] self.n_samples = self.data.shape[0] self.onehot_dict = {node[0]: one_hot_encoder(data[:][node[0]].to_numpy()) for node in self.nodes if node[1]['type'] == 'disc'} self.discretizer = discretizer(score=method, max_process=max_process) def cond_check(self, node, neigh, neighbors, scheme): """Returns the no of nodes that have either changed to independent or dependent on "node" due to discretization""" pool = Pool(self.max_process) PCS = set(self.cont_list + self.disc_list)-set([node]) data_disc = self.data.copy() data_disc[node] = scheme.transform(self.data[[node, neigh]]) data_disc = data_disc.replace({node: pd.unique(data_disc[node])}, {node: list(range(pd.unique(data_disc[node]).shape[0]))}) self.onehot_dict[node] = one_hot_encoder(data_disc[node].to_numpy()) args = [(self.data,self.nodes,self.onehot_dict,node, neigh, [],self.alpha) for neigh in PCS] PCS = PCS - set(pool.map(parallel, args)) args = [(self.data,self.nodes,self.onehot_dict,node, X, [Z],self.alpha) for X in PCS for Z in PCS-set([X])] new_neighbors = PCS-set(pool.map(parallel, args)) pool.close() pool.join() return len(new_neighbors - set(neighbors)) + len(set(neighbors) - new_neighbors) def discretize(self): """Entry point into the algorithm""" cont_queue = [] for node in self.cont_list: if len(list(self.skel.neighbors(node))) != 0: ratio = len(set(self.skel.neighbors(node))-set(self.cont_list))/len(list(self.skel.neighbors(node))) else: ratio = 0 cont_queue.append((ratio, len(list(self.skel.neighbors(node))), node)) cont_queue = sorted(cont_queue, key=lambda x: (-x[0], -x[1])) while cont_queue: (ratio, _, node) = cont_queue.pop(0) best_score = -1 best_scheme = None main_list = [] if ratio == 0: iter_set = set(self.disc_list) else: iter_set = set(self.skel.neighbors(node))-set(self.cont_list) for neigh in iter_set: scheme, score = self.discretizer.fit(self.data[[node, neigh]], node, neigh) main_list.append((self.cond_check(node, neigh, list(self.cond_check_skel[node]), scheme), score, scheme, neigh)) (best_shd, best_score, best_scheme, best_neigh) = sorted(main_list, key=lambda i: (i[0], -i[1]))[0] self.data[node] = best_scheme.transform(self.data[[node, best_neigh]]) self.data = self.data.replace({node: pd.unique(self.data[node])}, {node: list(range(pd.unique(self.data[node]).shape[0]))}) self.cont_list.remove(node) self.disc_list.append(node) for i in range(len(cont_queue)): if cont_queue[i][2] in set(self.skel.neighbors(node)): cont_queue[i] = (cont_queue[i][0]+1/len(list(self.skel.neighbors(cont_queue[i][2]))), cont_queue[i][1], cont_queue[i][2]) cont_queue = sorted(cont_queue, key=lambda x: (-x[0], -x[1])) return self.data def PCA_discretizer(data, skel, PCS_neigh, nodes, alpha, max_process, threshold = 90): cont_nodes = [node[0] for node in nodes if node[1]['type'] == 'cont'] data_cont = data.loc[:, cont_nodes].values data_transformed = StandardScaler().fit_transform(data_cont)
[ "sarthak.chakraborty@gmail.com" ]
sarthak.chakraborty@gmail.com
d2757e0e2e810e0a983f7fd5a8610b771d1ad224
cbb38d66021dcac8277ff34a67f3cf4c7d25d780
/python - 核心编程/第一章 python核心/第十二章 面向对象编程/元类(Metaclass)和__metaclass__.py
d17c8f7d75c518bb4194e1644a04454cee9a955d
[]
no_license
heqiang/PythonCore
64cb0e98fdd01148b1ff3892197f92fcd24cf6be
ff29c5746c86cf0c7853c9ffddfab44731a651c0
refs/heads/master
2022-12-31T06:07:09.609739
2020-10-12T09:01:48
2020-10-12T09:01:48
270,339,685
0
0
null
null
null
null
UTF-8
Python
false
false
2,493
py
from time import ctime from warnings import warn #元类的主要目的就是为了当创建类时能够自动地改变类。 # 拦截类的创建 # 修改类 # 返回修改之后的类 # 关于type # 1 查看对象的类型 # 动态的创建一个类 type(类名, 由父类名称组成的元组(针对继承的情况,可以为空),包含属性的字典(名称和值)) class FatBoy(object): def __init__(self): self.name="hq" # print("Boy") class FatBoss(FatBoy): ceshi="FatBoss" # print('Boss') boy=FatBoy() boy.age=15 print(boy.__dict__) #使用type创建一个实例对象 Test1=type("Test",(),{"ceshi":3}) #添加的属性为类属性 不是实例属性 Test1.foo=2 #创建一个Test的子类 testSon=type("testSon",(Test1,),{}) # print(testSon.ceshi) # print(testSon.__mro__) #使用type创建一个带有方法的类 @classmethod def sell(self): pass # print(self.ceshi) FatGirl=type("fatGirl",(FatBoss,),{"sell":sell}) # print(hasattr(FatGirl, 'ceshi')) # FatGirl.sell() FatGirl.sell() # print(sell.__class__.__class__) ''' 利用函数来定义类 ''' def upper_attr(class_name,class_parents,class_attr): ''' :param class_name:类名 :param class_parents: 父类 :param class_attr: 以字典保存的类属性 :return: 返回一个类 ''' new_attr={} for name,value in class_attr.items(): print("name =%s and value = %s" % (name,value)) if not name.startswith("__"): new_attr[name.upper()]=value print("大写后的属性名:%s" %(name.upper())) print("值是:%s"%(value)) #利用type创建一个类 return type(class_name,class_parents,new_attr) class Foo(object,metaclass=upper_attr): bar=30 f=Foo() # print(hasattr(Foo, 'bar')) # print(hasattr(Foo, 'BAR')) # print(f.BAR) ''' 利用 class 来定义元类 ''' class UpperAttrMetaClass(type): def __new__(cls,class_name, class_parents,class_atts): new_attr={} for name,value in class_atts.items(): if not name.startswith("__"): new_attr[name.upper()]=value def sell(self): print("sell") new_attr["sell"]=sell return type(class_name,class_parents,new_attr) class Zoo(object,metaclass=UpperAttrMetaClass): bar="bip" z=Zoo() print(hasattr(Zoo, 'bar')) print(hasattr(Zoo, 'BAR')) print(z.BAR) z.sell()
[ "1422127065@qq.com" ]
1422127065@qq.com
afed7ea6d061ea4ffba6ac412727824fddf76201
85aee72efca0bfa64ae14b6966e85e24880f2a0d
/setup.py
ed620d174c6f01e6dfca6cd0bbdfba8ffedbcfec
[]
no_license
Kirilliann/binospec
be285f1d9398074d5aa31dcf8db9325d10be0f25
46808832a40b8570207597ac05972331a8873ae3
refs/heads/master
2020-08-23T00:32:27.591238
2019-12-21T07:38:42
2019-12-21T07:38:42
216,507,192
0
0
null
null
null
null
UTF-8
Python
false
false
960
py
from setuptools import setup, find_packages with open('README.md') as fh: long_description = fh.read() setup( name='bino', version='0.0.1', author='Kirill Grishin', author_email='grishin@voxastro.org', description='Zeropoint calculation for MMIRS and Binospec data', long_description=long_description, long_description_content_type='text/markdown', url='http://sai.msu.ru', license='MIT', packages=find_packages(), test_suite='test', install_requires=['numpy>=1.17.4', 'scipy>=1.3.2', 'matplotlib>=3.0.3', 'pyvo>=1.0', 'argparse>=1.1', 'lmfit>=0.9.15', 'astropy>=3.2.3'], classifiers=[ 'Intended Audience :: Science/Research', 'Intended Audience :: Education', 'License :: OSI Approved :: MIT License', 'Topic :: Education', 'Programming Language :: Python :: 3', ], keywords='astrophysics instrumentation', )
[ "grischin.ka15@physics.msu.ru" ]
grischin.ka15@physics.msu.ru
b97c353d15577f892f4c7527f443931cd60f9aa7
80ae9b5cfb45b6e9cf7873ef7c46e17e117e4019
/data/HackerRank-Regex/Positive Lookbehind.py
23e3268d2cc962de0f7fb667cc7ce7748300acd8
[]
no_license
Ritvik19/CodeBook
ef7764d89b790e902ede5802f36d5ca910d8a50e
2b4ed7938bbf156553d6ba5cba6216449528f0fc
refs/heads/master
2021-07-04T08:25:52.478719
2020-08-08T06:54:14
2020-08-08T06:54:14
138,744,302
3
0
null
null
null
null
UTF-8
Python
false
false
180
py
import re Regex_Pattern = r"(?<=[13579])\d" # Do not delete 'r'. Test_String = input() match = re.findall(Regex_Pattern, Test_String) print("Number of matches :", len(match))
[ "rastogiritvik99@gmail.com" ]
rastogiritvik99@gmail.com
b364c2ac4cc2d109f0d1e32eb825597312c92230
052faf8843a3e26d9fab5da737dfd02cb43f5592
/Pacote_Python/Ex_python_CEV/exe034.py
75ef339f8eb1f06795b7fcd0092e4b99b1539012
[]
no_license
Cica013/Exercicios-Python-CursoEmVideo
5c65ca3a8423f338b51ac82c6eab44ec5d2a2e74
d2c711bccae34e6016937e086a9f2d9126a860af
refs/heads/main
2023-04-06T01:46:31.107902
2021-04-22T00:02:53
2021-04-22T00:02:53
360,339,119
0
0
null
null
null
null
UTF-8
Python
false
false
467
py
# Escreva um programa que pergunte o salário de um funcionário e calcule o valor do seu aumento. # Para salários superiores à 1.250,00, calcule um aumento de 10% . Para inferiores ou iguais, o aumento é de 15%. salario = float(input('Digite aqui o salário do funcionário: ')) if salario > 1250: aumento = (salario * 10) / 100 else: aumento = (salario * 15) / 100 n_salario = salario + aumento print(f'O salário do funcionário será de {n_salario} ')
[ "61808853+Cica013@users.noreply.github.com" ]
61808853+Cica013@users.noreply.github.com
e6a70c92d641c6ad953c251e533973d27eefbd52
8002e0b1b625c702ba908338721c04432a97ab04
/manage.py
d245bc3ff4a9c412bd18ad817a3278213ea823b6
[]
no_license
IKermani/OPDC-pipeline-flow-DSS
5024920a82d8245fe87b3d8b0dbe960f907a8973
04fbaf20e10d3d3d4738eed77e027225d8bf254a
refs/heads/master
2023-07-02T14:35:29.721947
2021-08-09T16:13:48
2021-08-09T16:13:48
393,700,689
2
0
null
null
null
null
UTF-8
Python
false
false
659
py
#!/usr/bin/env python """Django's command-line utility for administrative tasks.""" import os import sys def main(): """Run administrative tasks.""" os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'DSS.settings') try: from django.core.management import execute_from_command_line except ImportError as exc: raise ImportError( "Couldn't import Django. Are you sure it's installed and " "available on your PYTHONPATH environment variable? Did you " "forget to activate a virtual environment?" ) from exc execute_from_command_line(sys.argv) if __name__ == '__main__': main()
[ "imankermani@hotmail.com" ]
imankermani@hotmail.com
dd63fdead6db72d7ca934174cabe1223906ffcd3
7da40408c4eb36c7394d7250e939f5be984036bb
/Sandy/bst.py
f3bf61767b00f09de90ed1c0340cbf6b242c1d51
[ "MIT" ]
permissive
sandeepm96/cormen-algos
069a74539878462bb69efd8674f9c9049f4a5e43
9154f6ce9cb0c318bc0d6ecaa13676d080985cec
refs/heads/master
2021-09-15T16:01:58.156177
2018-06-06T05:42:10
2018-06-06T05:42:10
103,383,140
1
0
null
null
null
null
UTF-8
Python
false
false
2,535
py
class node: def __init__(self, key=None): self.parent = None self.key = key self.left = None self.right = None class BSTree: def __init__(self): self.root = node() def insert(self, key): if self.root.key is None: self.root.key = key else: item = node(key) x = self.root while x is not None: y = x if x.key > key: x = x.left else: x = x.right item.parent = y if y.key > key: y.left = item else: y.right = item def search(self, key): x = self.root while x is not None and x.key!=key: if x.key > key: x = x.left elif x.key < key: x = x.right return x def inorder(self, root): if root is not None: self.inorder(root.left) print root.key, self.inorder(root.right) def min(self, root): while root.left is not None: root = root.left return root def max(self, root): while root.right is not None: root = root.right return root def delete(self, key): x = self.root while x is not None and x.key!=key: if x.key > key: x = x.left elif x.key < key: x = x.right if x is not None: y = x.parent if x.left is None and x.right is None: temp = None elif x.left is None: temp = x.right temp.parent = y elif x.right is None: temp = x.left temp.parent = y else: temp = self.min(x.right) if temp.parent.left == temp: temp.parent.left = None else: temp.parent.right = None temp.parent = y temp.left = x.left temp.right = x.right if y is not None: if y.left == x: y.left = temp else: y.right = temp else: if temp is not None: self.root = temp else: self.root = node() else: raise Exception('Node doesn\'t exist for key value %s' % key)
[ "sandeep.mahapatra.5@gmail.com" ]
sandeep.mahapatra.5@gmail.com
60394e55fe2e034e42773605b73addddc5a8d395
f0d713996eb095bcdc701f3fab0a8110b8541cbb
/zJSF5EfPe69e9sJAc_24.py
e9113672790b3572ce048bef538b6d686bc24d89
[]
no_license
daniel-reich/turbo-robot
feda6c0523bb83ab8954b6d06302bfec5b16ebdf
a7a25c63097674c0a81675eed7e6b763785f1c41
refs/heads/main
2023-03-26T01:55:14.210264
2021-03-23T16:08:01
2021-03-23T16:08:01
350,773,815
0
0
null
null
null
null
UTF-8
Python
false
false
651
py
""" Create a function that takes a string `txt` and censors any word from a given list `lst`. The text removed must be replaced by the given character `char`. ### Examples censor_string("Today is a Wednesday!", ["Today", "a"], "-") ➞ "----- is - Wednesday!" censor_string("The cow jumped over the moon.", ["cow", "over"], "*"), "The *** jumped **** the moon.") censor_string("Why did the chicken cross the road?", ["Did", "chicken", "road"], "*") ➞ "Why *** the ******* cross the ****?" ### Notes N/A """ def censor_string(txt, lst, char): for word in lst : txt=txt.replace(word,char*len(word)) return txt
[ "daniel.reich@danielreichs-MacBook-Pro.local" ]
daniel.reich@danielreichs-MacBook-Pro.local
46cf2b151bf321158b142b2a57db9a0be4c9c245
9b2ce98e1fe8ce6b96bfb22cb5a37589301769c8
/Stack/stack implementation using list.py
7dba05c35cfbf3050c393c50b3f38d2b4d6a08e6
[]
no_license
hiranmayee1123/-50-days-data-structures
7126971cfca165361bfec11bb2dbb68b2efe96b9
69a009f9df56f9467b065e38f2355bc975e78fb7
refs/heads/main
2023-03-22T22:04:59.592350
2021-03-09T16:10:27
2021-03-09T16:10:27
332,808,918
5
1
null
null
null
null
UTF-8
Python
false
false
492
py
stack = [] # append() function to push # element in the stack stack.append('a') stack.append('b') stack.append('c') print('Initial stack') print(stack) # pop() fucntion to pop # element from stack in # LIFO order print('\nElements poped from stack:') print(stack.pop()) print(stack.pop()) print(stack.pop()) print('\nStack after elements are poped:') print(stack) # uncommenting print(stack.pop()) # will cause an IndexError # as the stack is now empty
[ "noreply@github.com" ]
hiranmayee1123.noreply@github.com
5e6042cdbeb41a05fb3ed2bae691d2131ec9ba98
d79b228cd97f92db1ae6f4efbf554557298a0669
/main.py
96921d7dd3f6c9f9b6ca1d347d0c5c2a16a3d985
[]
no_license
pulkit-ahujaa/hacker-rank-python-certification-sample-problem
7866838def39ba895137446a4889eaac88f8d6d5
d26f9eaca41b00abb84cfc91523c02fbe4cf392c
refs/heads/master
2022-10-21T13:43:28.628555
2020-06-18T07:15:25
2020-06-18T07:15:25
273,167,795
0
0
null
null
null
null
UTF-8
Python
false
false
196
py
for x in list(range(n)): output = "" if(x % 3 == 0): output += 'Fizz' if(x % 5 == 0): output += 'Buzz' if(output == ""): output += str(x) print(output)
[ "noreply@github.com" ]
pulkit-ahujaa.noreply@github.com
4db2679dfea9ba6124b4c4d0e1f4a0644ee11321
653bf020fabf012bca253cb4d4ca3fb80c7068ab
/Cart1/Cart/Cart/views.py
053b9c11d8b9367d672c0b10cb7f59add82ad6cc
[]
no_license
ravi35767/Cart
f49c61686639f08986d8017420929e758b077d72
8b71d2c29e9dc7f5beb0a6c0455f376d4206630c
refs/heads/master
2020-12-28T11:04:00.156422
2020-02-04T20:59:03
2020-02-04T20:59:03
238,304,867
0
0
null
null
null
null
UTF-8
Python
false
false
106
py
from django.shortcuts import render def index(request): return render(request, 'Cart/cart.html')
[ "noreply@github.com" ]
ravi35767.noreply@github.com
ee895dbe07158ef23f219efc0a92b0c25a5e6c88
ac17bba2af76a55b4888c5804940aa9a219b6f8e
/JokeRecommendationNeurIPS.py
08a5a9395cdc5a331bd67ccfdc6de9024a76a344
[]
no_license
prasanna5raut/SRF_NeurIPS2020
711b4d564cfeeba9b5759c8a3f6e0a28668aac65
9ed089223cdb6995fa1974e7997e57c52d4a2de1
refs/heads/main
2023-01-05T19:25:13.362327
2020-10-22T19:53:48
2020-10-22T19:53:48
306,428,450
2
0
null
null
null
null
UTF-8
Python
false
false
12,397
py
import numpy as np import pandas as pd import matplotlib.pyplot as plt from scipy.optimize import minimize import math ################################ DEFINE Functions and classes ################################ def OLFW(T, eps): """ Online Lagragian Franke-Wolf implementation input: T (Horizon), eps (confidence) output: sequence of x_t """ # Number of times each oracle is called K = np.int(T**0.5) # Step size of Online Gradient Ascent mu = 1.0 / K/ beta # delta delta = beta**2 # Initial point for the OGA oracles v0 = np.zeros(d) # Initialize K instances of OGA oracle and store in a list listOracle = [OnlineGA(v0) for i in range(K)] # Initialize the current estimate of p p_hat = pEstimate() # Initialize gamma_t's gammas = np.zeros(T) # for t in range(1,T): # gammas[t] = (2*G**2*np.log(2.0*T/eps)/t)**(0.5) # gammas[t] = 0 # Initialize the lambda_t's lambdas = np.zeros(T) # Store the decisions to return as output XX=np.zeros((d,T)) for t in range(T): # Online GA step X_temp = np.zeros((d,K+1)) for k in range(K): X_temp[:,k+1] = X_temp[:,k] + (1.0/K)*listOracle[k].v XX[:,t] = roundVec(X_temp[:,K]) # Update dual variables lambdas[t] = (1/(delta*mu))*max(0,np.dot(p_hat.vec, XX[:,t]) - B - gammas[t]) # Feedback the data to Online GA sub-routine for k in range(K): listOracle[k].update(mu, gradFJoke(t, X_temp[:,k]) - lambdas[t]*p_hat.vec) # Update the current estimate after nature reveals the data p_hat.update(t+1, P[t,:]) return XX def fM(t): """ This returns Mt matrix in the definition of Jester objective (f_t(x) = R_t^Tx + x^TM_tx) """ M = np.zeros((d,d)) for i in range(nBlocks): theta_min = np.amin(H[t, i*w:(i+1)*w]) theta = theta_min/(w**2-w) M[i*(w):(i+1)*w, i*w:(i+1)*w] = theta*(np.eye(w) - np.ones((w,w))) return M def fJoke(t, x): """ This is the objective function oracle. input: time t and decision vector x (and function rollout history H) output: f_t(x_t) """ M = fM(t) out=np.dot(x.T,np.dot(M,x)) + np.dot(x.T, H[t,:]) return out def gradFJoke(t, x): """ This is the objective function's gradient oracle. input: time t and decision vector x (and function rollout history H) output: grad(f_t(x_t)) """ out=0 M = fM(t) # print(Ht.shape) # print(u.shape) out=H[t,:] + np.dot(M+M.T,x) return out def g(t, x): """ This is the constraint function oracle. input: time t and decision vector x (and function rollout history P) output: g_t(x_t) = <p_t, x_t> - B """ return np.dot(P[t,:], x) def roundVec(X): """ Rounds the top KB entries to one """ ind = np.argpartition(X, -KB)[-KB:] out = np.zeros(d) np.put(out,ind,np.ones(ind.size)) return out class pEstimate: """ This class stores the current estimate of p in the field named 'vec' """ def __init__(self): self.vec = np.zeros(d) def update(self, t, p_t): """ Updates the current estimate by taking as input the current realization. """ self.vec = (t-1.0)/t*self.vec + p_t/t class OnlineGA: """ This is the class for defining Online Gradient Ascent Oracle. The output of the oracle is stored as field named 'v' """ def __init__(self, v0): """ Set the current output of oracle to be v0""" self.v = v0 def update(self, mu, direction): """ Updates the output by moving 'mu' units along 'direction'. """ self.v = self.v + mu*direction # Projection step 1 for s in range(d): if self.v[s] < 0: self.v[s] = 0 elif self.v[s] > 1: self.v[s] = 1 # Projection step 2 val = np.dot(np.ones(d),self.v) if val>KB: self.v = self.v -(val-KB)/d*np.ones(d) def BestOfBoth(T, eps): """ Best-of-both worlds algorithm input: T (Horizon), eps (parameter in W = T^{1 - eps}) output: sequence of x_t """ V = np.int(T**(1 - eps/4)) K = np.int(T**(1/2)) alpha = V*(T**0.5) # Initialize the meta-FW vectors (v_{t - 1}'s) and (v_t's) v_prev = np.zeros((d, K)) v_curr = np.zeros((d, K)) # Initialize the dual variables lambdas = np.zeros(K) # partial decision vectors x_t(k) X_partial_prev = np.zeros((d, K + 1)) X_partial_curr = np.zeros((d, K + 1)) # Store the decisions to return XX = np.zeros((d, T)) for t in range(1, T): # Primal update X_partial_curr[:, 0] = np.zeros(d) for k in range(K): new_position = v_prev[:, k] + (1/2/alpha)*(V*gradFJoke(t - 1, X_partial_prev[:, k]) - lambdas[k]*gradG(t - 1, v_prev[:, k])) # Perform projection back to ground constraint set # Projection step 1 for s in range(d): if new_position[s] < 0: new_position[s] = 0 elif new_position[s] > 1: new_position[s] = 1 # Projection step 2 val = np.dot(np.ones(d), new_position) if val > KB: new_position = new_position -(val-KB)/d*np.ones(d) v_curr[:, k] = new_position X_partial_curr[:, k + 1] = X_partial_curr[:, k] + (1/K)*v_curr[:, k] XX[:, t] = roundVec(X_partial_curr[:, K]) # Dual update for k in range(K): lambdas[k] = max(0, lambdas[k] + g(t - 1, v_prev[:, k]) - B + np.dot(gradG(t - 1, v_prev[:, k]), v_curr[:, k] - v_prev[:, k])) # Set current values to be the previous ones X_partial_prev = X_partial_curr v_prev = v_curr return XX def fM(t): """ This returns Mt matrix in the definition of Jester objective (f_t(x) = R_t^Tx + x^TM_tx) """ M = np.zeros((d,d)) for i in range(nBlocks): theta_min = np.amin(H[t, i*w:(i+1)*w]) theta = theta_min/(w**2-w) M[i*(w):(i+1)*w, i*w:(i+1)*w] = theta*(np.eye(w) - np.ones((w,w))) return M def fJoke(t, x): """ This is the objective function oracle. input: time t and decision vector x (and function rollout history H) output: f_t(x_t) """ M = fM(t) out=np.dot(x.T,np.dot(M,x)) + np.dot(x.T, H[t,:]) return out def gradFJoke(t, x): """ This is the objective function's gradient oracle. input: time t and decision vector x (and function rollout history H) output: grad(f_t(x_t)) """ out=0 M = fM(t) # print(Ht.shape) # print(u.shape) out=H[t,:] + np.dot(M+M.T,x) return out def g(t, x): """ This is the constraint function oracle. input: time t and decision vector x (and function rollout history P) output: g_t(x_t) = <p_t, x_t> - B """ return np.dot(P[t,:], x) def gradG(t, x): """ This is the constraint function's gradient oracle. input: time t and decision vector x output: grad(g_t(x)) """ return P[t, :] def roundVec(X): """ Rounds the top KB entries to one """ ind = np.argpartition(X, -KB)[-KB:] out = np.zeros(d) np.put(out,ind,np.ones(ind.size)) return out def BestOfBothOneGrad(T, V, alpha, K): """ Best-of-both worlds algorithm with one gradient evaluation per step input: T (Horizon), V (relative-weights parameter), alpha (step-size), K (number of ) output: sequence of x_t """ Q = np.int(T/K) # Initialize the meta-FW vectors (v_{q - 1}'s) and (v_q's) v_prev = np.zeros((d, K)) v_curr = np.zeros((d, K)) # Initialize the dual variables lambdas = np.zeros(K) # partial decision vectors x_t(k) X_partial_prev = np.zeros((d, Q, K + 1)) X_partial_curr = np.zeros((d, Q, K + 1)) # Time permutation prev_t = np.array(range(K)) # Store the decisions to return XX = np.zeros((d, T)) for q in range(1, Q): curr_t = np.random.permutation(range((q - 1)*K, q*K)) X_partial_curr[:, q, 0] = np.zeros(d) gradGbar_q_minus_one_matrix = np.zeros((d, K)) for k in range(K): # Evaluate gradG_bar_{q-1}(v_{q-1}^{k}) gradGbar_q_minus_one = np.zeros(d) for i in range(K): gradGbar_q_minus_one += gradG(prev_t[i], v_prev[:, k]) gradGbar_q_minus_one = 1/K*gradGbar_q_minus_one gradGbar_q_minus_one_matrix[:, k] = gradGbar_q_minus_one new_position = v_prev[:, k] + (1/2/alpha)*(V*gradFJoke(prev_t[k], X_partial_prev[:, q - 1, k]) - lambdas[k]*gradGbar_q_minus_one) # Perform projection back to ground constraint set # Projection step 1 for s in range(d): if new_position[s] < 0: new_position[s] = 0 elif new_position[s] > 1: new_position[s] = 1 # Projection step 2 val = np.dot(np.ones(d), new_position) if val > KB: new_position = new_position -(val-KB)/d*np.ones(d) v_curr[:, k] = new_position X_partial_curr[:, q, k + 1] = X_partial_curr[:, q, k] + 1/K*v_curr[:, k] for t in curr_t: XX[:, t] = roundVec(X_partial_curr[:, q, K]) # Dual update for k in range(K): # Evaluate g_bar_{q-1}(v_{q-1}^{k}) gbar_q_minus_one = 0 for i in range(K): gbar_q_minus_one += g(prev_t[i], v_prev[:, k]) - B gbar_q_minus_one = 1/K*gbar_q_minus_one lambdas[k] = max(0, lambdas[k] + gbar_q_minus_one + np.dot(gradGbar_q_minus_one_matrix[:, k], v_curr[:, k] - v_prev[:, k])) # Set current values to be the previous ones X_partial_prev = X_partial_curr v_prev = v_curr prev_t = curr_t # Return the history return XX def OSPHG(T, W): """ OSPHG implementation input: T (Horizon), W (Window size) output: sequence of x_t """ # Number of times each oracle is called K = np.int(T**0.5) # Step size of Online Gradient Ascent mu = 1.0 / (W*T)**0.5 # delta delta = beta**2 # Initial point for the OGA oracles v0 = np.zeros(d) # Initialize K instances of OGA oracle and store in a list listOracle = [OnlineGA(v0) for i in range(K)] # Initialize the lambda_t's lambdas = np.zeros(T) # Store the decisions to return as output XX=np.zeros((d,T)) for t in range(T): # Online GA step X_temp = np.zeros((d,K+1)) for k in range(K): X_temp[:,k+1] = X_temp[:,k] + (1.0/K)*listOracle[k].v XX[:,t] = roundVec(X_temp[:,K]) # Feedback the data to Online GA sub-routine for k in range(K): listOracle[k].update(mu, gradFJoke(t, X_temp[:,k]) - lambdas[t]*P[t, :]) # Update dual variables lambdas[t] = max(0,(1 - delta*mu**2)*lambdas[t] + mu*(np.dot(P[t, :], XX[:, t]) - B)) return XX # Trade-off experiments for different Window sizes eps_vec = [0.05, 0.25, 0.5, 0.75, 1] num = 5 final_utilityOSPHG = np.zeros(num) final_utilityBOB = np.zeros(num) final_budgetViolationOSPHG = np.zeros(num) final_budgetViolationBOB = np.zeros(num) for i in range(5): print(i) eps = eps_vec[i] W = np.int(T**(1 - eps)) # Run the algorithms X_historyOSPHG = OSPHG(T, W) X_historyBOB = BestOfBoth(T, eps) # Calculate the utility and budget violation # For OSPHG f_historyOSPHG = fJoke(0, X_historyOSPHG[:, 0]) for t in range(1, T): f_historyOSPHG = f_historyOSPHG + fJoke(t, X_historyOSPHG[:, t]) final_utilityOSPHG[i] = f_historyOSPHG g_historyOSPHG = g(0, X_historyOSPHG[:, 0]) for t in range(1, T): g_historyOSPHG = g_historyOSPHG + np.dot(P[t, :], X_historyOSPHG[:, t]) - B final_budgetViolationOSPHG[i] = g_historyOSPHG # For Best-of-Both f_historyBOB = fJoke(0, X_historyBOB[:, 0]) for t in range(1, T): f_historyBOB = f_historyBOB + fJoke(t, X_historyBOB[:, t]) final_utilityBOB[i] = f_historyBOB g_historyBOB = g(0, X_historyBOB[:, 0]) for t in range(1, T): g_historyBOB = g_historyBOB + np.dot(P[t, :], X_historyBOB[:, t]) - B final_budgetViolationBOB[i] = g_historyBOB # We plot final_utilityOSPHG and final_utlityBOB one subplot, and final_budgetViolationOSPHG and final_budgetViolationBOB on the other in Figure 1 (a) in the paper.
[ "noreply@github.com" ]
prasanna5raut.noreply@github.com
b54f9abb95e8aaaa4b220891479482a498011e59
711761b1b83677dd8d6039e9c1f20bce075fa1ab
/foodie/blog/migrations/0001_initial.py
bedfd15c9ad0dbb522566e7e5d3786c0a7027689
[]
no_license
LocalDevv/django-blog
0e531cf39d4974055ab3d00bdefc55d9e0ce3f48
49543a409731fe00850e8027938a8f24a926f1e0
refs/heads/master
2022-02-01T08:10:55.170705
2019-06-01T11:34:36
2019-06-01T11:34:36
null
0
0
null
null
null
null
UTF-8
Python
false
false
1,395
py
# Generated by Django 2.2.1 on 2019-05-29 15:03 from django.conf import settings from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): initial = True dependencies = [ migrations.swappable_dependency(settings.AUTH_USER_MODEL), ] operations = [ migrations.CreateModel( name='Category', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('name', models.CharField(max_length=25)), ], ), migrations.CreateModel( name='Post', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('title', models.CharField(max_length=50, unique=True)), ('featured_image', models.ImageField(blank=True, null=True, upload_to='featured-images/')), ('body', models.TextField()), ('published', models.DateTimeField(auto_now_add=True)), ('author', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)), ('category', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='blog.Category')), ], ), ]
[ "45708379+officialoghene@users.noreply.github.com" ]
45708379+officialoghene@users.noreply.github.com
82ba4b4de155c3ade2f1e62045d2a96ee50d82eb
3f13f6ffa4f98b4cfbc9a64bdc4b452e16ad6098
/Xception.py
9f5155159a3bc4c510790066ac76beec460fec44
[]
no_license
c964309085/keras_applications
298a67d6e6d60a5a08b209def771ebe709c5fe93
a76e9d33707ea7091f251dffeb5be508b868ca3d
refs/heads/master
2022-12-17T08:00:03.222950
2020-09-28T22:21:12
2020-09-28T22:21:12
299,357,090
2
0
null
null
null
null
UTF-8
Python
false
false
13,412
py
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== # pylint: disable=invalid-name """Xception V1 model for Keras. On ImageNet, this model gets to a top-1 validation accuracy of 0.790 and a top-5 validation accuracy of 0.945. Reference: - [Xception: Deep Learning with Depthwise Separable Convolutions]( https://arxiv.org/abs/1610.02357) (CVPR 2017) """ from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.python.keras import backend from tensorflow.python.keras.applications import imagenet_utils from tensorflow.python.keras.engine import training from tensorflow.python.keras.layers import VersionAwareLayers from tensorflow.python.keras.utils import data_utils from tensorflow.python.keras.utils import layer_utils from tensorflow.python.lib.io import file_io from tensorflow.python.util.tf_export import keras_export TF_WEIGHTS_PATH = ( 'https://storage.googleapis.com/tensorflow/keras-applications/' 'xception/xception_weights_tf_dim_ordering_tf_kernels.h5') TF_WEIGHTS_PATH_NO_TOP = ( 'https://storage.googleapis.com/tensorflow/keras-applications/' 'xception/xception_weights_tf_dim_ordering_tf_kernels_notop.h5') layers = VersionAwareLayers() @keras_export('keras.applications.xception.Xception', 'keras.applications.Xception') def Xception( include_top=True, weights='imagenet', input_tensor=None, input_shape=None, pooling=None, classes=1000, classifier_activation='softmax'): """Instantiates the Xception architecture. Reference: - [Xception: Deep Learning with Depthwise Separable Convolutions]( https://arxiv.org/abs/1610.02357) (CVPR 2017) Optionally loads weights pre-trained on ImageNet. Note that the data format convention used by the model is the one specified in your Keras config at `~/.keras/keras.json`. Note that the default input image size for this model is 299x299. Caution: Be sure to properly pre-process your inputs to the application. Please see `applications.xception.preprocess_input` for an example. Arguments: include_top: whether to include the fully-connected layer at the top of the network. weights: one of `None` (random initialization), 'imagenet' (pre-training on ImageNet), or the path to the weights file to be loaded. input_tensor: optional Keras tensor (i.e. output of `layers.Input()`) to use as image input for the model. input_shape: optional shape tuple, only to be specified if `include_top` is False (otherwise the input shape has to be `(299, 299, 3)`. It should have exactly 3 inputs channels, and width and height should be no smaller than 71. E.g. `(150, 150, 3)` would be one valid value. pooling: Optional pooling mode for feature extraction when `include_top` is `False`. - `None` means that the output of the model will be the 4D tensor output of the last convolutional block. - `avg` means that global average pooling will be applied to the output of the last convolutional block, and thus the output of the model will be a 2D tensor. - `max` means that global max pooling will be applied. classes: optional number of classes to classify images into, only to be specified if `include_top` is True, and if no `weights` argument is specified. classifier_activation: A `str` or callable. The activation function to use on the "top" layer. Ignored unless `include_top=True`. Set `classifier_activation=None` to return the logits of the "top" layer. Returns: A `keras.Model` instance. Raises: ValueError: in case of invalid argument for `weights`, or invalid input shape. ValueError: if `classifier_activation` is not `softmax` or `None` when using a pretrained top layer. """ if not (weights in {'imagenet', None} or file_io.file_exists(weights)): raise ValueError('The `weights` argument should be either ' '`None` (random initialization), `imagenet` ' '(pre-training on ImageNet), ' 'or the path to the weights file to be loaded.') if weights == 'imagenet' and include_top and classes != 1000: raise ValueError('If using `weights` as `"imagenet"` with `include_top`' ' as true, `classes` should be 1000') # Determine proper input shape input_shape = imagenet_utils.obtain_input_shape( input_shape, default_size=299, min_size=71, data_format=backend.image_data_format(), require_flatten=include_top, weights=weights) if input_tensor is None: img_input = layers.Input(shape=input_shape) else: if not backend.is_keras_tensor(input_tensor): img_input = layers.Input(tensor=input_tensor, shape=input_shape) else: img_input = input_tensor channel_axis = 1 if backend.image_data_format() == 'channels_first' else -1 x = layers.Conv2D( 32, (3, 3), strides=(2, 2), use_bias=False, name='block1_conv1')(img_input) x = layers.BatchNormalization(axis=channel_axis, name='block1_conv1_bn')(x) x = layers.Activation('relu', name='block1_conv1_act')(x) x = layers.Conv2D(64, (3, 3), use_bias=False, name='block1_conv2')(x) x = layers.BatchNormalization(axis=channel_axis, name='block1_conv2_bn')(x) x = layers.Activation('relu', name='block1_conv2_act')(x) residual = layers.Conv2D( 128, (1, 1), strides=(2, 2), padding='same', use_bias=False)(x) residual = layers.BatchNormalization(axis=channel_axis)(residual) x = layers.SeparableConv2D( 128, (3, 3), padding='same', use_bias=False, name='block2_sepconv1')(x) x = layers.BatchNormalization(axis=channel_axis, name='block2_sepconv1_bn')(x) x = layers.Activation('relu', name='block2_sepconv2_act')(x) x = layers.SeparableConv2D( 128, (3, 3), padding='same', use_bias=False, name='block2_sepconv2')(x) x = layers.BatchNormalization(axis=channel_axis, name='block2_sepconv2_bn')(x) x = layers.MaxPooling2D((3, 3), strides=(2, 2), padding='same', name='block2_pool')(x) x = layers.add([x, residual]) residual = layers.Conv2D( 256, (1, 1), strides=(2, 2), padding='same', use_bias=False)(x) residual = layers.BatchNormalization(axis=channel_axis)(residual) x = layers.Activation('relu', name='block3_sepconv1_act')(x) x = layers.SeparableConv2D( 256, (3, 3), padding='same', use_bias=False, name='block3_sepconv1')(x) x = layers.BatchNormalization(axis=channel_axis, name='block3_sepconv1_bn')(x) x = layers.Activation('relu', name='block3_sepconv2_act')(x) x = layers.SeparableConv2D( 256, (3, 3), padding='same', use_bias=False, name='block3_sepconv2')(x) x = layers.BatchNormalization(axis=channel_axis, name='block3_sepconv2_bn')(x) x = layers.MaxPooling2D((3, 3), strides=(2, 2), padding='same', name='block3_pool')(x) x = layers.add([x, residual]) residual = layers.Conv2D( 728, (1, 1), strides=(2, 2), padding='same', use_bias=False)(x) residual = layers.BatchNormalization(axis=channel_axis)(residual) x = layers.Activation('relu', name='block4_sepconv1_act')(x) x = layers.SeparableConv2D( 728, (3, 3), padding='same', use_bias=False, name='block4_sepconv1')(x) x = layers.BatchNormalization(axis=channel_axis, name='block4_sepconv1_bn')(x) x = layers.Activation('relu', name='block4_sepconv2_act')(x) x = layers.SeparableConv2D( 728, (3, 3), padding='same', use_bias=False, name='block4_sepconv2')(x) x = layers.BatchNormalization(axis=channel_axis, name='block4_sepconv2_bn')(x) x = layers.MaxPooling2D((3, 3), strides=(2, 2), padding='same', name='block4_pool')(x) x = layers.add([x, residual]) for i in range(8): residual = x prefix = 'block' + str(i + 5) x = layers.Activation('relu', name=prefix + '_sepconv1_act')(x) x = layers.SeparableConv2D( 728, (3, 3), padding='same', use_bias=False, name=prefix + '_sepconv1')(x) x = layers.BatchNormalization( axis=channel_axis, name=prefix + '_sepconv1_bn')(x) x = layers.Activation('relu', name=prefix + '_sepconv2_act')(x) x = layers.SeparableConv2D( 728, (3, 3), padding='same', use_bias=False, name=prefix + '_sepconv2')(x) x = layers.BatchNormalization( axis=channel_axis, name=prefix + '_sepconv2_bn')(x) x = layers.Activation('relu', name=prefix + '_sepconv3_act')(x) x = layers.SeparableConv2D( 728, (3, 3), padding='same', use_bias=False, name=prefix + '_sepconv3')(x) x = layers.BatchNormalization( axis=channel_axis, name=prefix + '_sepconv3_bn')(x) x = layers.add([x, residual]) residual = layers.Conv2D( 1024, (1, 1), strides=(2, 2), padding='same', use_bias=False)(x) residual = layers.BatchNormalization(axis=channel_axis)(residual) x = layers.Activation('relu', name='block13_sepconv1_act')(x) x = layers.SeparableConv2D( 728, (3, 3), padding='same', use_bias=False, name='block13_sepconv1')(x) x = layers.BatchNormalization( axis=channel_axis, name='block13_sepconv1_bn')(x) x = layers.Activation('relu', name='block13_sepconv2_act')(x) x = layers.SeparableConv2D( 1024, (3, 3), padding='same', use_bias=False, name='block13_sepconv2')(x) x = layers.BatchNormalization( axis=channel_axis, name='block13_sepconv2_bn')(x) x = layers.MaxPooling2D((3, 3), strides=(2, 2), padding='same', name='block13_pool')(x) x = layers.add([x, residual]) x = layers.SeparableConv2D( 1536, (3, 3), padding='same', use_bias=False, name='block14_sepconv1')(x) x = layers.BatchNormalization( axis=channel_axis, name='block14_sepconv1_bn')(x) x = layers.Activation('relu', name='block14_sepconv1_act')(x) x = layers.SeparableConv2D( 2048, (3, 3), padding='same', use_bias=False, name='block14_sepconv2')(x) x = layers.BatchNormalization( axis=channel_axis, name='block14_sepconv2_bn')(x) x = layers.Activation('relu', name='block14_sepconv2_act')(x) if include_top: x = layers.GlobalAveragePooling2D(name='avg_pool')(x) imagenet_utils.validate_activation(classifier_activation, weights) x = layers.Dense(classes, activation=classifier_activation, name='predictions')(x) else: if pooling == 'avg': x = layers.GlobalAveragePooling2D()(x) elif pooling == 'max': x = layers.GlobalMaxPooling2D()(x) # Ensure that the model takes into account # any potential predecessors of `input_tensor`. if input_tensor is not None: inputs = layer_utils.get_source_inputs(input_tensor) else: inputs = img_input # Create model. model = training.Model(inputs, x, name='xception') # Load weights. if weights == 'imagenet': if include_top: weights_path = data_utils.get_file( 'xception_weights_tf_dim_ordering_tf_kernels.h5', TF_WEIGHTS_PATH, cache_subdir='models', file_hash='0a58e3b7378bc2990ea3b43d5981f1f6') else: weights_path = data_utils.get_file( 'xception_weights_tf_dim_ordering_tf_kernels_notop.h5', TF_WEIGHTS_PATH_NO_TOP, cache_subdir='models', file_hash='b0042744bf5b25fce3cb969f33bebb97') model.load_weights(weights_path) elif weights is not None: model.load_weights(weights) return model @keras_export('keras.applications.xception.preprocess_input') def preprocess_input(x, data_format=None): return imagenet_utils.preprocess_input(x, data_format=data_format, mode='tf') @keras_export('keras.applications.xception.decode_predictions') def decode_predictions(preds, top=5): return imagenet_utils.decode_predictions(preds, top=top) preprocess_input.__doc__ = imagenet_utils.PREPROCESS_INPUT_DOC.format( mode='', ret=imagenet_utils.PREPROCESS_INPUT_RET_DOC_TF, error=imagenet_utils.PREPROCESS_INPUT_ERROR_DOC) decode_predictions.__doc__ = imagenet_utils.decode_predictions.__doc__
[ "noreply@github.com" ]
c964309085.noreply@github.com
b383fccab7ab2ceb965b3fa18a2dc8b17bf173ef
87829066b8152bb71450827b1d0bafe0cda3efc8
/collective/googleplus/interfaces/googlepluslayerlayer.py
70d3f46a983dfaeca00365474490523f53ab9ee4
[]
no_license
toutpt/collective.googleplus
fb7c53133bfb47d90dea202f8ae3207ac44a9409
fd8aa1ad11ecb08d8f056fd77c168067da0ea589
refs/heads/master
2021-01-25T08:49:05.841177
2011-10-21T08:33:45
2011-10-21T08:33:45
2,619,033
0
0
null
null
null
null
UTF-8
Python
false
false
203
py
from zope.interface import Interface # -*- Additional Imports Here -*- class IGooglePlusLayer(Interface): """ A layer specific to this product. Is registered using browserlayer.xml """
[ "toutpt@gmail.com" ]
toutpt@gmail.com
9b988be306bff1c995b75c4e2260d7650b7d77eb
3364a394529f7e6e176914457823e23c8fd763da
/api_test.py
b5ca67ad4d2215b05d0d2eab5df8e2b224b3123e
[]
no_license
wangzj-wzj/Pipeline_obj_detection
344d0adcba88aae716a8ef4ce471b89462e55be1
bedfe37bff64b24feec6c75ab551fd4be9ff71db
refs/heads/main
2023-05-07T12:25:26.181417
2021-05-24T03:06:19
2021-05-24T03:06:19
370,206,468
0
0
null
null
null
null
UTF-8
Python
false
false
931
py
import flask, json from flask import request import retinanet.predict ''' flask: web框架,通过flask提供的装饰器@server.route()将普通函数转换为服务 登录接口,需要传url、username、passwd ''' # 创建一个服务,把当前这个python文件当做一个服务 server = flask.Flask(__name__) # server.config['JSON_AS_ASCII'] = False # @server.route()可以将普通函数转变为服务 登录接口的路径、请求方式 @server.route('/img_detect', methods=['get', 'post']) def img_detect(): # 获取通过url请求传参的数据 img = request.values.get('img_path') detect_results = retinanet.predict.predict(img) return json.dumps(detect_results, ensure_ascii=False) # 将字典转换为json串, json是字符串 if __name__ == '__main__': server.run(debug=True, port=8888, host='0.0.0.0')# 指定端口、host,0.0.0.0代表不管几个网卡,任何ip都可以访问
[ "noreply@github.com" ]
wangzj-wzj.noreply@github.com
5088979a6d1f6005908b26499cf407e91c77f10d
06c716e3c044159f117b4fdd0f1a96ee0eb97045
/src/leet/Wildcard Matching.py
50dfaf7bfcb5188bdc1e5931b3fc6b23633cd6f1
[]
no_license
sevenseablue/leetcode
9c3c41d4b02713afc32f13e7baf3bee007583d0f
2a29426be1d690b6f90bc45b437900deee46d832
refs/heads/master
2020-05-27T19:08:32.709254
2018-10-22T05:06:15
2018-10-22T05:06:15
23,282,399
0
0
null
null
null
null
UTF-8
Python
false
false
3,897
py
# -*- coding: utf-8 -*- """ __author__ = 'wangdawei' __time__ = '18-1-13 上午8:34' """ class Solution_dp: def isMatch(self, s, p): """ :type s: str :type p: str :rtype: bool """ memo = {} def dp(i, j): if (i, j) not in memo: # print("(%s, %s) not in memo" % (i, j)) if j == len(p): ans = i == len(s) else: # firstMatch = p[j] in {s[i], "?", "*"} iIn = i < len(s) if iIn and (p[j] == "?" or p[j] == s[i]): ans = dp(i + 1, j + 1) elif p[j] == "*": ans = dp(i, j + 1) or (iIn and dp(i + 1, j)) or (iIn and dp(i + 1, j + 1)) else: ans = False memo[(i, j)] = ans # print("memo[(%s, %s)]" % (i, j), memo[(i, j)]) return memo[(i, j)] return dp(0, 0) pass class Solution: def isMatch(self, s, p): """ :type s: str :type p: str :rtype: bool """ pi, si, last_si, lastpi = 0, 0, -1, -1 while si < len(s): if pi < len(p) and (s[si] == p[pi] or p[pi] == '?'): si += 1 pi += 1 elif pi < len(p) and p[pi] == '*': pi += 1 last_si = si lastpi = pi elif lastpi != -1: last_si += 1 si = last_si pi = lastpi else: return False while pi < len(p) and p[pi] == '*': pi += 1 return pi == len(p) def main(): solution = Solution() # solution.isMatch("aa", "a") # exit(-1) # solution.isMatch("aa", "aa") # solution.isMatch("aaa", "aa") # solution.isMatch("aa", "a*") # solution.isMatch("aa", ".*") # solution.isMatch("ab", ".*") # solution.isMatch("aab", "c*a*b") # assert (solution.isMatch("aa", "a") == False) # assert (solution.isMatch("aa", "aa") == True) # assert (solution.isMatch("aaa", "aa") == False) assert (solution.isMatch("", "*") == True) assert (solution.isMatch("", "*?") == False) assert (solution.isMatch("aa", "??") == True) assert (solution.isMatch("aa", "*") == True) assert (solution.isMatch("aa", "a*") == True) assert (solution.isMatch("ab", "?*") == True) assert (solution.isMatch("aab", "c*a*b") == False) exit() assert (solution.isMatch("a", "ab*a") == False) assert (solution.isMatch("ab", ".*..") == True) assert (solution.isMatch("aa", "a") == False) assert (solution.isMatch("aa", "aa") == True) assert (solution.isMatch("aaa", "aa") == False) assert (solution.isMatch("aa", "a*") == True) assert (solution.isMatch("", ".*") == True) assert (solution.isMatch("aa", ".*") == True) assert (solution.isMatch("ab", ".*") == True) assert (solution.isMatch("ab", ".*b.*") == True) assert (solution.isMatch("abab", ".*b.*") == True) assert (solution.isMatch("abab", ".*b.*b") == True) assert (solution.isMatch("abab", ".*ba*.*a*b") == True) assert (solution.isMatch("ababc", ".*ba*.*a*b") == False) assert (solution.isMatch("abab", ".*ba*.*a*") == True) assert (solution.isMatch("abab", ".*ba*.*a*c") == False) assert (solution.isMatch("abab", ".*ba*.*a*c.*") == False) assert (solution.isMatch("abab", ".*ba*.*a*b") == True) assert (solution.isMatch("", "a*a*a*") == True) assert (solution.isMatch("a", "a*a*a*") == True) assert (solution.isMatch("b", "a*b*a*") == True) assert (solution.isMatch("aab", "c*a*b") == True) assert (solution.isMatch("aab", "c*a*b") == True) assert (solution.isMatch("aab", "c*a*b") == True) if __name__ == "__main__": main()
[ "sevenseablue@gmail.com" ]
sevenseablue@gmail.com
39d9721ee7b9442a04d1995e89ae4cd4309afd24
b0ce88d2168e76086f537cf440d0b55de5171c05
/service/db/models/greetings.py
00d528355fce3b7a7b5d66b3cacf7e0e4e8c06e7
[ "MIT" ]
permissive
alixedi/python-service-template
75cfde5b09c85d730b8bcc374b54c65f0eafbe80
0b3ae27a7b7d11ce80b8f5193b360f27f09334e8
refs/heads/master
2020-04-13T19:42:12.282588
2019-01-12T23:50:09
2019-01-12T23:50:09
163,410,488
1
0
null
null
null
null
UTF-8
Python
false
false
1,264
py
from sqlalchemy import Column, Integer, String from service.db.models import Base class User(Base): __tablename__ = 'users' id = Column(Integer, primary_key=True) hello = Column(String(100)) name = Column(String(100)) def __init__(self, hello=None, name=None): """ By all means override the constructor if only to set defaults. This is a contrived example. """ self.hello = hello or 'Hello' self.name = name or 'World' @property def greeting(self): """ This is an example of defining a property instead of a column when the result can be easily and cheaply computed and doesn't need to be stored. """ return f'{self.hello} {self.name}!' @staticmethod def get_users_with_name(session, name): """ This is an example of how to define a query using staticmethod to use the model class as a "namespace" only. """ return session.query(User).filter(User.name==name) @staticmethod def get_user(session, user_id): """ Get the user with the given user_id. """ return session.query(User).get(user_id) def __str__(self): return self.greeting
[ "alixedi@gmail.com" ]
alixedi@gmail.com
b2d5cde1f50f5d1b45f3c0215213b71949c690c7
afc8d5a9b1c2dd476ea59a7211b455732806fdfd
/Configurations/VBSjjlnu/Full2018v6s5/conf_wbindeta/plot_postfit.py
5cb2d299a7f604ea098d5ad1b1439eed5d2c29c3
[]
no_license
latinos/PlotsConfigurations
6d88a5ad828dde4a7f45c68765081ed182fcda21
02417839021e2112e740607b0fb78e09b58c930f
refs/heads/master
2023-08-18T20:39:31.954943
2023-08-18T09:23:34
2023-08-18T09:23:34
39,819,875
10
63
null
2023-08-10T14:08:04
2015-07-28T07:36:50
Python
UTF-8
Python
false
false
5,003
py
# plot configuration from ROOT import TColor # groupPlot = {} # # Groups of samples to improve the plots. # If not defined, normal plots is used colors = { # https://root.cern.ch/doc/master/classTColor.html#C02 'kWhite' : 0, 'kBlack' : 1, 'kGray' : 920, 'kRed' : 632, 'kGreen' : 416, 'kBlue' : 600, 'kYellow' : 400, 'kMagenta' : 616, 'kCyan' : 432, 'kOrange' : 800, 'kSpring' : 820, 'kTeal' : 840, 'kAzure' : 860, 'kViolet' : 880, 'kPink' : 900, } palette = { "Orange": (242, 108, 13), #f26c0d "Yellow": (247, 195, 7), #f7c307 "LightBlue": (153, 204, 255), #99ccff "MediumBlue": (72, 145, 234), #4891ea "MediumBlue2": (56, 145, 224), #3891e0 "DarkBlue": (8, 103, 136), #086788 "Green": (47, 181, 85), #2fb555 "Green2": (55, 183, 76), #37b74c "LightGreen" : (82, 221, 135), #52dd87 "Violet": (242, 67, 114), #f24372 "Wjets_deta5": (247, 155, 7),#f79b07 "Wjets_deta4": (247, 175, 7), #f7af07 "Wjets_deta3": (247, 195, 7), #f7c307 "Wjets_deta2": (247, 215, 7), #f7d707 "Wjets_deta1": (247, 235, 7), #f7eb07 } groupPlot['Fake'] = { 'nameHR' : "Fake", 'isSignal' : 0, 'color': palette["LightBlue"], 'samples' : ['Fake'], 'fill': 1001 } groupPlot['vbfV+VV+VVV'] = { 'nameHR' : 'vbfV+VV+VVV', 'isSignal' : 0, 'color': palette["MediumBlue2"], 'samples' : ['VBF-F','VVV', 'VV'], 'fill': 1001 } groupPlot['DY'] = { 'nameHR' : "DY", 'isSignal' : 0, 'color': palette["Green2"], 'samples' : ['DY'], 'fill': 1001 } groupPlot['top'] = { 'nameHR' : 'top', 'isSignal' : 0, 'color': palette["Orange"], 'samples' : ['top'], 'fill': 1001 } detabins = ["Wjets_deta{}".format(i) for i in range(1,6)] for ibin, detabin in enumerate(detabins): groupPlot[detabin] = { 'nameHR' : 'W+Jets_{}'.format(ibin), 'isSignal' : 0, 'color': palette[detabin], 'samples' : [detabin], 'fill': 1001 } groupPlot['VBS'] = { 'nameHR' : 'VBS', 'isSignal' : 1, 'color': colors["kRed"]+1, 'samples' : ['VBS'], 'fill': 1001 } groupPlot['total_prefit'] = { 'nameHR' : 'pre-fit', 'isSignal' : 2, 'color': 616, 'samples' : ['total_prefit'], 'fill': 1001 } #plot = {} # keys here must match keys in samples.py # plot['VVV'] = { 'color': colors["kAzure"] -3, 'isSignal' : 0, 'isData' : 0, 'scale' : 1.0 } plot['VV'] = { 'color': colors['kGreen']+3, 'isSignal' : 0, 'isData' : 0, 'scale' : 1. , } plot['DY'] = { 'color': colors['kMagenta']+1, 'isSignal' : 0, 'isData' : 0, 'scale' : 1.0, } plot['VBF-V'] = { 'color': colors['kYellow']+3, 'isSignal' : 0, 'isData' : 0, 'scale' : 1. , } plot['Fake'] = { 'color': colors['kTeal'], 'isSignal' : 0, 'isData' : 0, 'scale' : 1.0, } plot['top'] = { 'color': colors['kAzure']-1, 'isSignal' : 0, 'isData' : 0, 'scale' : 1.0, } for detabin in detabins: plot[detabin] = { 'color': colors['kRed']-3, 'isSignal' : 0, 'isData' : 0, 'scale' : 1.0 , } # plot['Wjets'] = { # # } plot['VBS'] = { 'color': colors["kCyan"]+1, 'isSignal' : 1, 'isData' : 0, 'scale' : 1. , } # # data plot['DATA'] = { 'nameHR' : 'Data', 'color': 1 , 'isSignal' : 0, 'isData' : 1 , 'isBlind' : 0 } plot['total_prefit'] = { 'nameHR' : 'pre-fit', 'color': 616, 'isSignal' : 2, 'isData' : 0, 'scale' : 1. , } # additional options legend['lumi'] = 'L = 59.74/fb' legend['sqrt'] = '#sqrt{s} = 13 TeV'
[ "davide.valsecchi@cern.ch" ]
davide.valsecchi@cern.ch
f64d93bbff877b12f837b3df1091f51081a263e3
6fb6c9e0795ebf8dd3a976a95752f8f16f2c0036
/Decision and Flow Control.py
34991a5cda6fffc69432f63c0ba6b88509318329
[]
no_license
RajatOberoi/Decision-control-and-loops
f066a281cd7fe241f398ee22b6fa4b2dc26590a0
e6d7682288514c1131d788335dd073dca1bbd67c
refs/heads/master
2020-03-27T00:10:07.728359
2018-08-21T17:59:57
2018-08-21T17:59:57
145,600,408
0
0
null
null
null
null
UTF-8
Python
false
false
3,490
py
#Ques1 x=int(input("enter any year")) if x%400==0: print("leap year",x) elif x%100==0: print("not a leap year",x) elif x%4==0: print("leap year",x) else: print("not a leap year") #Ques2 a=int(input("enter length")) b=int(input("enter breadth")) if a==b: print("dimensions are of square") else: print("dimensions are of rectangle") #Ques3 z=int(input("enter age of first person")) c=int(input("enter age of second person")) v=int(input("enter age of third person")) if z>c and z>v: print("first person is the oldest") if c>v: print("third person is the youngest") else: print("second person is the youngest") elif c>z and c>v: print("second person is the oldest") if z>v: print("third person is the youngest") else: print("first person is the youngest") else: print("third person is the oldest") if z>c: print("second person is the youngest") else: print("first person is the youngest") #Ques4 q=int(input("enter age of person")) w=input("enter sex as M for Male and F for female") e=input("enter Y if married and N for not") if w=="F": if e=="Y": print("person is female and will only work in urban areas and is married and her age is",q) else: print("person is female and will only work in urban areas and is not married and her age is",q) elif w=="M": if q>=20 and q<40: if e=="Y": print("person is male and can work anywhere and the person is married and his age is",q) else: print("person is male and can work anywhere and the person is not married and his age is",q) elif q>=40 and q<=60: if e=="Y": print("person is male and will only work in urban areas and is married and his age is",q) else: print("person is male and will only work in urban areas and is not married and his age is",q) else: print("ERROR") #Ques5 r=int(input("enter no of items you want to purchase")) l=r*100 if l>1000: l=l-(l*0.1) print("final cost",l) else: print("final cost",l) #************************************LOOPS*********************************************************************************** #Ques1 for i in range(10): x=int(input("enter no")) print(x) #Ques2 while True: print("infinite loop") #Ques3 h=[] y=[] o=int(input("enter no of element in list")) for i in range(o): n=int(input("enter element in the list")) h.append(n) y.append(n*n) print(h) print(y) #Ques4 r=[] u=[] p=[] k=[5,5.2,6.3,44,"abcde","fgh","jkl",10] print(k) for i in k: if isinstance(i,int): r.append(i) elif isinstance(i,float): u.append(i) elif isinstance(i,str): p.append(i) print(r) print(u) print(p) #Ques5 prime=[] for num in range(1,101): if num>1: for i in range(2,num): if (num % i) == 0: break else: prime.append(num) print(prime) #Ques6 sy="*" for i in range(5): print(i*sy) #Ques7 d=[] f=int(input("enter no of element in the list")) for i in range(f): t=int(input("enter no")) d.append(t) print(d) j=int(input("enter element you want to delete from list")) for i in d: if i==j: d.remove(i) print(d)
[ "noreply@github.com" ]
RajatOberoi.noreply@github.com
acb8b42f4644ad80964e2c74884fec5737905aa5
2f635f5eee1c01b66a08c8b532d26f5f657159eb
/lib7z.py
642fcbc814629be121a308e121e480ffc8b6af69
[]
no_license
supplient/ComicExtracter
e7c4df25b8d5ce54b7b94a42a192e7307da645d8
c37ed3e93f01cb811f9a458e541faddbcbba6beb
refs/heads/master
2022-11-08T01:45:21.242812
2020-06-21T08:25:55
2020-06-21T08:25:55
268,029,082
0
0
null
null
null
null
UTF-8
Python
false
false
965
py
import subprocess def extract(path, dest_dir, pwd): args = ["7z.exe", "x", path, "-p"+pwd, "-o"+dest_dir] p = subprocess.Popen( args, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, ) stdout, stderr = p.communicate("S".encode("utf8")) stdout = stdout.decode("gbk") stderr = stderr.decode("gbk") if "ERROR" in stderr: return False return True if __name__ == "__main__": p = subprocess.Popen( ["7z.exe", "x", "test/c.7z", "-pgmgard.com", "-otest/oo"], stdout=subprocess.PIPE, stdin=subprocess.PIPE, stderr=subprocess.PIPE, ) stdout, stderr = p.communicate("S".encode("utf8")) stdout = stdout.decode("gbk") stderr = stderr.decode("gbk") print(stdout) print(stderr) if "ERRORS" in stderr: print("Failed") elif "Ok" in stdout: print("Succeed")
[ "437566830@qq.com" ]
437566830@qq.com
f084847e39dc4e454e7424936757f85acbfde69a
4f6f675fbe3c84e56de0d45b498e0d828bb5927b
/face_morph/triangulation.py
8b9b65c0a0c2ab83e3346ad97718e6c0525d91e9
[]
no_license
kevinkinking/face_morph
909bd7b1a4b73aa05662ae94fb5dc1c4afa47665
4c4d07bc15d58ee3631bf4086c5210728c10548b
refs/heads/master
2020-03-18T11:40:21.988136
2018-05-25T07:28:29
2018-05-25T07:28:29
134,685,101
0
0
null
null
null
null
UTF-8
Python
false
false
5,146
py
import cv2 import numpy as np FACE_POINTS = list(range(0, 68)) JAW_POINTS = list(range(0, 17)) LEFT_EYE_POINTS = list(range(36, 42)) LEFT_BROW_POINTS = list(range(17, 22)) MOUTH_POINTS = list(range(48,68)) NOSE_POINTS = list(range(27,36)) RIGHT_EYE_POINTS = list(range(42,48)) RIGHT_BROW_POINTS = list(range(22,27)) LEFT_FACE = list(range(0, 9)) + list(range(17,22)) RIGHT_FACE = list(range(8, 17)) + list(range(22,27)) JAW_END = 17 FACE_START = 0 FACE_END = 68 OVERLAY_POINTS = [ LEFT_FACE, RIGHT_FACE, JAW_POINTS, ] def draw_point(img, p, color): cv2.circle(img, (p[0], p[1]), 2, color, cv2.FILLED, cv2.LINE_AA, 0) def matrix_rectangle(left, top, width, height): pointer = [ (left, top), (left + width / 2, top), (left + width - 1, top), (left + width - 1, top + height / 2), (left, top + height / 2), (left, top + height - 1), (left + width / 2, top + height - 1), (left + width - 1, top + height - 1) ] return pointer def matrix_rectangle1(left, top, width, height): pointer = [ (left, top), (left + width / 2, top), (left + width - 1, top), (left + width - 1, top + height / 10), (left, top + height / 10), (left, top + height - 1), (left + width / 2, top + height - 1), (left + width - 1, top + height - 1) ] return pointer def rect_contains(rect, point): if point[0] < rect[0]: return False elif point[1] < rect[1]: return False elif point[0] > rect[2]: return False elif point[1] > rect[3]: return False return True def measure_triangle(image, points): rect = (0, 0, image.shape[1], image.shape[0]) sub_div = cv2.Subdiv2D(rect) for p in points: sub_div.insert(p) triangle_list = sub_div.getTriangleList() triangle = [] pt = [] for t in triangle_list: pt.append((t[0], t[1])) pt.append((t[2], t[3])) pt.append((t[4], t[5])) pt1 = (t[0], t[1]) pt2 = (t[2], t[3]) pt3 = (t[4], t[5]) if rect_contains(rect, pt1) and rect_contains(rect, pt2) and rect_contains(rect, pt3): ind = [] for j in range(0, 3): for k in range(0, len(points)): if abs(pt[j][0] - points[k][0]) < 1.0 and abs(pt[j][1] - points[k][1]) < 1.0: ind.append(k) if len(ind) == 3: triangle.append((ind[0], ind[1], ind[2])) pt = [] return triangle def morph_triangle(src, dst, img, t_src, t_dst, t, alpha): r1 = cv2.boundingRect(np.float32([t_src])) r2 = cv2.boundingRect(np.float32([t_dst])) r = cv2.boundingRect(np.float32([t])) t1_rect = [] t2_rect = [] t_rect = [] for i in range(0, 3): t_rect.append(((t[i][0] - r[0]), (t[i][1] - r[1]))) t1_rect.append(((t_src[i][0] - r1[0]), (t_src[i][1] - r1[1]))) t2_rect.append(((t_dst[i][0] - r2[0]), (t_dst[i][1] - r2[1]))) mask = np.zeros((r[3], r[2], 3), dtype=np.float32) cv2.fillConvexPoly(mask, np.int32(t_rect), (1.0, 1.0, 1.0), 16, 0) img1_rect = src[r1[1]:r1[1] + r1[3], r1[0]:r1[0] + r1[2]] img2_rect = dst[r2[1]:r2[1] + r2[3], r2[0]:r2[0] + r2[2]] size = (r[2], r[3]) warp_img1 = affine_transform(img1_rect, t1_rect, t_rect, size) warp_img2 = affine_transform(img2_rect, t2_rect, t_rect, size) # k_size = (3, 3) # warp_img1 = cv2.blur(warp_img1, k_size, (r[2]/2, r[3]/2)) # warp_img1 = cv2.blur(warp_img2, k_size, (r[2]/2, r[3]/2)) img_rect = (1.0 - alpha) * warp_img1 + alpha * warp_img2 img[r[1]:r[1] + r[3], r[0]:r[0] + r[2]] = img[r[1]:r[1] + r[3], r[0]:r[0] + r[2]] * (1 - mask) + img_rect * mask def affine_triangle(src, dst, t_src, t_dst): r1 = cv2.boundingRect(np.float32([t_src])) r2 = cv2.boundingRect(np.float32([t_dst])) t1_rect = [] t2_rect = [] t2_rect_int = [] for i in range(0, 3): t1_rect.append((t_src[i][0] - r1[0], t_src[i][1] - r1[1])) t2_rect.append((t_dst[i][0] - r2[0], t_dst[i][1] - r2[1])) t2_rect_int.append((t_dst[i][0] - r2[0], t_dst[i][1] - r2[1])) mask = np.zeros((r2[3], r2[2], 3), dtype=np.float32) cv2.fillConvexPoly(mask, np.int32(t2_rect_int), (1.0, 1.0, 1.0), 16, 0) img1_rect = src[r1[1]:r1[1] + r1[3], r1[0]:r1[0] + r1[2]] size = (r2[2], r2[3]) img2_rect = affine_transform(img1_rect, t1_rect, t2_rect, size) img2_rect = img2_rect * mask dst[r2[1]:r2[1] + r2[3], r2[0]:r2[0] + r2[2]] = dst[r2[1]:r2[1] + r2[3], r2[0]:r2[0] + r2[2]] * ( (1.0, 1.0, 1.0) - mask) dst[r2[1]:r2[1] + r2[3], r2[0]:r2[0] + r2[2]] = dst[r2[1]:r2[1] + r2[3], r2[0]:r2[0] + r2[2]] + img2_rect def affine_transform(src, src_tri, dst_tri, size): warp_mat = cv2.getAffineTransform(np.float32(src_tri), np.float32(dst_tri)) dst = cv2.warpAffine(src, warp_mat, (size[0], size[1]), None, flags=cv2.INTER_LINEAR, borderMode=cv2.BORDER_REFLECT_101) return dst
[ "kevin.works@qq.com" ]
kevin.works@qq.com
25b3360e42ef54932355d67e8eab36a968f5d8f0
6d9fb9a2e0890df7f79a7d2190a7e61d2f06340e
/AX.py
2b04a3e0ba4394828a0d0b48579f9bb4b6d37194
[]
no_license
d-roma/pae_sim
21f4de7496de7f7ef3b438ee0e35af3d036b2ba1
7b1ed71fb2d93cd7b93309b3ec69df61272ff97c
refs/heads/main
2023-03-19T01:27:24.828907
2021-03-18T16:15:15
2021-03-18T16:15:15
346,449,008
0
0
null
null
null
null
UTF-8
Python
false
false
4,627
py
# !/usr/bin/python3 # -*- coding: utf-8 -*- """ AX12 emulator """ from enum import Enum from collections import OrderedDict class AX_instruction(Enum): IDLE = 0x00 PING = 0x01 READ = 0x02 WRITE = 0x03 REG_WRITE = 0x04 ACTION = 0x05 RESET = 0x06 SYNC_WRITE = 0x83 END = 0xFF class AX_registers(Enum): MODEL_NUMBER_L = 0 MODEL_NUMBER_H = 1 FIRMWARE_VERSION = 2 ID = 3 BAUD_RATE = 0x04 RET_DELAY_TIME = 0x05 CW_ANGLE_LIMIT_L = 0x06 CW_ANGLE_LIMIT_H = 0x07 CCW_ANGLE_LIMIT_L = 0x08 CCW_ANGLE_LIMIT_H = 0x09 RESERVED__0x0A = 0x0A HIGH_TEMP_LIMIT = 0x0B LOW_VOLTAGE_LIMIT = 0x0C HIGH_VOLTAGE_LIMIT = 0x0D MAX_TORQUE_L = 0x0E MAX_TORQUE_H = 0x0F STATUS_RET = 0x10 ALARM_LED = 0x11 ALARM_SHUTDWN = 0x12 RESERVED__0x13 = 0x13 DWN_CAL_L = 0x14 DWN_CAL_H = 0x15 UP_CAL_L = 0x16 UP_CAL_H = 0x17 TORQUE_ENABLED = 0x18 LED = 0x19 IR_LEFT = 0x1A IR_CENTER = 0x1B IR_RIGHT = 0x1C CCW_COMP_SLOPE = 0x1D GOAL_POS_L = 0x1E GOAL_POS_H = 0x1F GOAL_SPEED_L = 0x20 GOAL_SPEED_H = 0x21 TORQUE_LIMIT_L = 0x22 TORQUE_LIMIT_H = 0x23 PRESENT_POS_L = 0x24 PRESENT_POS_H = 0x25 PRESENT_SPEED_L = 0x26 PRESENT_SPEED_H = 0x27 PRESENT_LOAD_L = 0x28 PRESENT_LOAD_H = 0x29 PRESENT_VOLTAGE = 0x2A PRESENT_TEMP = 0x2B REGISTERED_INSTR = 0x2C ADC_VALUE = 0x2D MOVING = 0x2E LOCK = 0x2F PUNCH_L = 0x30 PUNCH_H = 0x31 AX12_reset_memory = { # Memory position: (Reset value, description) AX_registers.MODEL_NUMBER_L: (0x0C, "Model Number(L)"), AX_registers.MODEL_NUMBER_H: (0x00, "Model Number(H)"), AX_registers.FIRMWARE_VERSION: (0x01, "Firmware Version"), AX_registers.ID: (0, "ID",), AX_registers.BAUD_RATE: (0x01, "Baud Rate",), AX_registers.RET_DELAY_TIME: (0xFA, "Return Delay Time",), AX_registers.CW_ANGLE_LIMIT_L: (0, "CW Angle Limit(L)",), AX_registers.CW_ANGLE_LIMIT_H: (0, "CW Angle Limit(H)",), AX_registers.CCW_ANGLE_LIMIT_L: (0xFF, "CCW Angle Limit(L)",), AX_registers.CCW_ANGLE_LIMIT_H: (0x03, "CCW Angle Limit(H)",), AX_registers.RESERVED__0x0A: (0, "Reserved",), AX_registers.HIGH_TEMP_LIMIT: (0x55, "High Temp. Limit",), AX_registers.LOW_VOLTAGE_LIMIT: (0x3C, "Low Voltage Limit",), AX_registers.HIGH_VOLTAGE_LIMIT: (0xBE, "High Voltage Limit",), AX_registers.MAX_TORQUE_L: (0xFF, "Max Torque(L)",), AX_registers.MAX_TORQUE_H: (0x03, "Max Torque(H)",), AX_registers.STATUS_RET: (0x02, "Status Return Level",), AX_registers.ALARM_LED: (0x04, "Alarm LED",), AX_registers.ALARM_SHUTDWN: (0x04, "Alarm Shoutdown",), AX_registers.RESERVED__0x13: (0, "Reserved",), AX_registers.DWN_CAL_L: (0, "Down Calibration(L)",), AX_registers.DWN_CAL_H: (0, "Down Calibration(H)",), AX_registers.UP_CAL_L: (0, "Up Calibration(L)",), AX_registers.UP_CAL_H: (0, "Up Calibration(H)",), AX_registers.TORQUE_ENABLED: (0, "Torque Enable",), AX_registers.LED: (0, "LED",), AX_registers.IR_LEFT: (0x7F, "Left IR",), AX_registers.IR_CENTER: (0x7F, "Center IR",), AX_registers.IR_RIGHT: (0x00, "Right IR",), AX_registers.CCW_COMP_SLOPE: (0, "CCW Compliance Slope",), AX_registers.GOAL_POS_L: (0, "Goal Position(L)",), AX_registers.GOAL_POS_H: (0, "Goal Position(H)",), AX_registers.GOAL_SPEED_L: (0, "Moving Speed(L)",), AX_registers.GOAL_SPEED_H: (0, "Moving Speed(H)",), AX_registers.TORQUE_LIMIT_L: (0x0E, "Torque Limit(L)",), AX_registers.TORQUE_LIMIT_H: (0x0F, "Torque Limit(H)",), AX_registers.PRESENT_POS_L: (0, "Present Position(L)",), AX_registers.PRESENT_POS_H: (0, "Present Position(H)",), AX_registers.PRESENT_SPEED_L: (0, "Present Speed(L)",), AX_registers.PRESENT_SPEED_H: (0, "Present Speed(H)",), AX_registers.PRESENT_LOAD_L: (0, "Present Load(L)",), AX_registers.PRESENT_LOAD_H: (0, "Present Load(H)",), AX_registers.PRESENT_VOLTAGE: (0, "Present Voltage",), AX_registers.PRESENT_TEMP: (0, "Present Temperature",), AX_registers.REGISTERED_INSTR: (0, "Registered Instruction",), AX_registers.ADC_VALUE: (0, "ADC_value",), AX_registers.MOVING: (0, "Moving",), AX_registers.LOCK: (0, "Lock",), AX_registers.PUNCH_L: (0, "Punch(L)",), AX_registers.PUNCH_H: (0, "Punch(H)"), } class AX(OrderedDict): def __init__(self, id): self.id = id self.reset() def reset(self): for key, value in AX12_reset_memory.items(): self[key] = value[0] self[AX_registers.ID] = self.id
[ "roma@ieec.cat" ]
roma@ieec.cat
80a914b5a51c11c1fe0423656c0e64de497a65e0
eecf60acbadd375a1b65b7c69af5bc609ac0dfa6
/extract.py
25eadddc36fa4e8a76f90d5d90565ad83454fbb9
[]
no_license
zZhouzhiYing/MyObjectDetection
7ded6c1986d5f2ae4e84f5094792526d07fb5c45
01ee79202e79d795316dd8112ca18f796b5c1f17
refs/heads/master
2023-02-20T16:36:43.257141
2021-01-19T09:03:29
2021-01-19T09:03:29
327,833,538
0
0
null
null
null
null
UTF-8
Python
false
false
3,050
py
import cv2 import numpy as np import torch from torch.autograd import Variable from torchvision import models import torchvision.transforms as transforms def preprocess_image(cv2im, resize_im=True): """ Processes image for CNNs Args: PIL_img (PIL_img): Image to process resize_im (bool): Resize to 224 or not returns: im_as_var (Pytorch variable): Variable that contains processed float tensor """ # mean and std list for channels (Imagenet) mean = [0.485, 0.456, 0.406] std = [0.229, 0.224, 0.225] # Resize image if resize_im: cv2im = cv2.resize(cv2im, (224, 224)) # im_as_arr = np.float32(cv2im) # im_as_arr = np.ascontiguousarray(im_as_arr[..., ::-1]) # im_as_arr = im_as_arr.transpose(2, 0, 1) # Convert array to D,W,H # # Normalize the channels # for channel, _ in enumerate(im_as_arr): # im_as_arr[channel] /= 255 # im_as_arr[channel] -= mean[channel] # im_as_arr[channel] /= std[channel] # # Convert to float tensor # im_as_ten = torch.from_numpy(im_as_arr).float() # # Add one more channel to the beginning. Tensor shape = 1,3,224,224 # im_as_ten.unsqueeze_(0) # # Convert to Pytorch variable # im_as_var = Variable(im_as_ten, requires_grad=True) return cv2im class FeatureVisualization(): def __init__(self,img_path,selected_layer): self.img_path=img_path self.selected_layer=selected_layer self.pretrained_model = models.vgg16(pretrained=True).features def process_image(self): img=cv2.imread(self.img_path) img=preprocess_image(img) return img def get_feature(self): # input = Variable(torch.randn(1, 3, 224, 224)) input=self.process_image() print(input.shape) x=input for index,layer in enumerate(self.pretrained_model): x=layer(x) if (index == self.selected_layer): return x def get_single_feature(self): features=self.get_feature() print(features.shape) feature=features[:,0,:,:] print(feature.shape) feature=feature.view(feature.shape[1],feature.shape[2]) print(feature.shape) return feature def save_feature_to_img(self): #to numpy feature=self.get_single_feature() feature=feature.data.numpy() #use sigmod to [0,1] feature= 1.0/(1+np.exp(-1*feature)) # to [0,255] feature=np.round(feature*255) print(feature[0]) cv2.imwrite('./img.jpg',feature) if __name__=='__main__': # get class # myClass=FeatureVisualization('bus.jpg',5) # print (myClass.pretrained_model) # myClass.save_feature_to_img() img=cv2.imread('bus.jpg') print(img.shape) # cv2.imshow('img',img) # cv2.waitKey(0) cv2im=preprocess_image(img) print(cv2im.shape) im_as_arr = np.float32(cv2im) im_as_arr = np.ascontiguousarray(im_as_arr[..., ::-1]) print(im_as_arr.shape)
[ "zzyzhouzhiying@outlook.com" ]
zzyzhouzhiying@outlook.com
f010fa48df7b2cb3eb4b31a07514690f9eca0a5a
0d9c8d1c28f84ed29512f331548595b947b3376f
/dev/views.py
733f49ccd0f63521c2863e2c641de84eb19193fa
[ "MIT" ]
permissive
Gerard-007/musicalpacks
88c2c4dc19201bc2633f96663e1ab2d441f63afc
ef12c7281b395268ac53247fd34c3499f7a0569a
refs/heads/master
2022-12-14T14:42:01.458533
2019-06-15T21:06:04
2019-06-15T21:06:04
181,792,220
0
0
MIT
2022-12-08T02:48:41
2019-04-17T01:12:31
CSS
UTF-8
Python
false
false
133
py
from django.http import HttpResponse from django.shortcuts import render def home(request): return render(request, 'home.html')
[ "Danchuksm@gmail.com" ]
Danchuksm@gmail.com
3a2b6efff4a7164d8281dce499ebf522a7a6164b
2df0b1b7160cd58f02190b86ae245f2f5a9d2232
/svm/dataset.py
bbaf14a03498e226d98ef6b9a4b5d7183cc3d56f
[ "Apache-2.0" ]
permissive
gareth1brown/hainesfork
1e3085f65494bc80b0516862555b1e333d81b11d
bcddc22c2a97050d8e7a07021097388d447cf914
refs/heads/master
2021-01-21T00:16:24.943605
2013-10-06T22:39:12
2013-10-06T22:39:12
13,342,607
1
0
null
null
null
null
UTF-8
Python
false
false
4,362
py
# -*- coding: utf-8 -*- # Copyright 2010 Tom SF Haines # Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. import numpy import operator class Dataset: """Contains a dataset - lots of pairs of feature vectors and labels. For conveniance labels can be arbitrary python objects, or at least python objects that work for indexing a dictionary.""" def __init__(self): # labels are internally stored as consecutive integers - this does the conversion... self.labelToNum = dict() self.numToLabel = [] # Store of data blocks - each block is a data matrix and label list pair (A lot of blocks could be of length one of course.)... self.blocks = [] def add(self, featVect, label): """Adds a single feature vector and label.""" if label in self.labelToNum: l = self.labelToNum[label] else: l = len(self.numToLabel) self.numToLabel.append(label) self.labelToNum[label] = l self.blocks.append((featVect.reshape((1,featVect.shape[0])).astype(numpy.double),[l])) def addMatrix(self, dataMatrix, labels): """This adds a data matrix alongside a list of labels for it. The number of rows in the matrix should match the number of labels in the list.""" assert(dataMatrix.shape[0]==len(labels)) # Add any labels not yet seen... for l in labels: if l not in self.labelToNum.keys(): num = len(self.numToLabel) self.numToLabel.append(l) self.labelToNum[l] = num # Convert the given labels list to a list of numerical labels... ls = map(lambda l:self.labelToNum[l],labels) # Store... self.blocks.append((dataMatrix.astype(numpy.double),ls)) def getLabels(self): """Returns a list of all the labels in the data set.""" return self.numToLabel def getCounts(self): """Returns a how many features with each label have been seen - as a list which aligns with the output of getLabels.""" ret = [0]*len(self.numToLabel) for block in self.blocks: for label in block[1]: ret[label] += 1 return ret def subsampleData(self, count): """Returns a new dataset object which contains count instances of the data, sampled from the data contained within without repetition. Returned Dataset could miss some of the classes.""" size = 0 for block in self.blocks: size += len(block[1]) subset = numpy.random.permutation(size)[:count] subset.sort() pos = 0 index = 0 ret = Dataset() for block in self.blocks: while subset[index]<(pos+len(block[1])): loc = subset[index] - pos ret.add(block[0][loc,:], block[1][loc]) index += 1 if index==subset.shape[0]: return ret pos += len(block[1]) return ret def getTrainData(self, lNeg, lPos): """Given two labels this returns a pair of a data matrix and a y vector, where lPos features have +1 and lNeg features have -1. Features that do not have one of these two labels will not be included.""" # Convert the given labels to label numbers... if lNeg in self.labelToNum: ln = self.labelToNum[lNeg] else: ln = -1 if lPos in self.labelToNum: lp = self.labelToNum[lPos] else: lp = -1 # Go through the blocks and extract the relevant info... dataList = [] yList = [] for dataMatrix, labels in self.blocks: y = filter(lambda l:l==lp or l==ln,labels) if len(y)!=0: def signRes(l): if l==lp: return 1.0 else: return -1.0 y = numpy.array(map(signRes,y), dtype=numpy.float_) inds = map(operator.itemgetter(0), filter(lambda l:l[1]==lp or l[1]==ln, enumerate(labels))) data = dataMatrix[numpy.array(inds),:] dataList.append(data) yList.append(y) # Glue it all together into big blocks, and return 'em... return (numpy.vstack(dataList),numpy.concatenate(yList))
[ "gareth1.brown@googlemail.com" ]
gareth1.brown@googlemail.com
69e814af9a82875996e817500b77e867a121fbbe
ba580e5d29352f60ad02b271c121e24109b08229
/spconv/pytorch/quantization/intrinsic/quantized/__init__.py
00a9c9d79c8d54c814c44d7ce7b5bcbafc2800ad
[ "Apache-2.0" ]
permissive
traveller59/spconv
2fde12075d80b7965b0196e32e7f0585fe0f1392
125a194d895b1bc3ad6ff907bc72641548397b32
refs/heads/master
2023-09-01T15:48:18.443364
2023-04-19T14:58:31
2023-04-19T14:58:31
166,499,614
1,499
382
Apache-2.0
2023-08-28T08:17:32
2019-01-19T02:57:09
Python
UTF-8
Python
false
false
600
py
# Copyright 2022 Yan Yan # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from .conv_relu import *
[ "yanyan.sub@outlook.com" ]
yanyan.sub@outlook.com
fdb7273a878ad878f05d2831e74b42e5a755b245
1ad3234576f549f1fbebf3db26b34dfbef46e8e1
/run_seq2seq.py
c23700e4d4c85e8322bb94d8f73028c81415ad39
[ "MIT" ]
permissive
siyuzhou/GNN
9a55b1c8bf29a191483482831cbd01ac4ae4b307
9254a811851c067c1f123e45f52a713090c9ad5d
refs/heads/master
2020-03-09T09:36:04.692059
2019-09-23T18:38:45
2019-09-23T18:38:45
128,716,477
0
1
null
null
null
null
UTF-8
Python
false
false
8,656
py
import os import argparse import json import tensorflow as tf import numpy as np import gnn from gnn.modules import mlp_layers from gnn.data import load_data def seq2seq(time_segs, params, pred_steps, training=False): # timeseries shape [num_sims, time_seq_len, num_agents, ndims] num_segs, time_seg_len, num_agents, ndims = time_segs.shape.as_list() time_segs = tf.reshape(time_segs, [-1, time_seg_len, num_agents * ndims]) # Shape [pred_steps, time_seg_len, num_agents * ndims] pred_seqs = tf.TensorArray(tf.float32, pred_steps) with tf.variable_scope('RNN_Encoder'): encoder_lstm_cell = tf.nn.rnn_cell.LSTMCell( params['units'], dtype=tf.float32, name='encoder_lstm') init_state = encoder_lstm_cell.zero_state(tf.shape(time_segs)[0], dtype=tf.float32) _, hidden_state = tf.nn.dynamic_rnn(encoder_lstm_cell, time_segs[:, :-1, :], initial_state=init_state, dtype=tf.float32) with tf.variable_scope('prediction_one_step') as decoder_scope: decoder_lstm_cell = tf.nn.rnn_cell.LSTMCell( params['units'], name='decoder_lstm', dtype=tf.float32) prev_state = time_segs[:, -1, :] def one_step(i, prev_state, rnn_state, time_series_stack): with tf.name_scope(decoder_scope.original_name_scope): output, rnn_state = decoder_lstm_cell(prev_state, rnn_state) pred = tf.layers.dense(output, num_agents * ndims, name='linear') next_state = prev_state + pred time_series_stack = time_series_stack.write(i, next_state) return i+1, next_state, rnn_state, time_series_stack i = 0 _, _, _, pred_seqs = tf.while_loop( lambda i, p, t, s: i < pred_steps, one_step, [i, prev_state, hidden_state, pred_seqs] ) pred_seqs = tf.transpose(pred_seqs.stack(), [1, 0, 2]) pred_seqs = tf.reshape(pred_seqs, [-1, pred_steps, num_agents, ndims]) return pred_seqs def model_fn(features, labels, mode, params): pred_seqs = seq2seq(features['time_series'], params, params['pred_steps'], training=(mode == tf.estimator.ModeKeys.TRAIN)) predictions = {'next_steps': pred_seqs} if mode == tf.estimator.ModeKeys.PREDICT: return tf.estimator.EstimatorSpec(mode=mode, predictions=predictions) loss = tf.losses.mean_squared_error(labels, pred_seqs) if mode == tf.estimator.ModeKeys.TRAIN: learning_rate = tf.train.exponential_decay( learning_rate=params['learning_rate'], global_step=tf.train.get_global_step(), decay_steps=1000, decay_rate=0.99, staircase=True, name='learning_rate' ) optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate) train_op = optimizer.minimize(loss=loss, global_step=tf.train.get_global_step()) return tf.estimator.EstimatorSpec(mode=mode, loss=loss, train_op=train_op) # Use the loss between adjacent steps in original time_series as baseline time_series_loss_baseline = tf.metrics.mean_squared_error(features['time_series'][:, 1:, :, :], features['time_series'][:, :-1, :, :]) eval_metric_ops = {'time_series_loss_baseline': time_series_loss_baseline} return tf.estimator.EstimatorSpec(mode=mode, loss=loss, eval_metric_ops=eval_metric_ops) def input_fn(features, seg_len, pred_steps, batch_size, mode='train'): time_series = features['time_series'] num_sims, time_steps, num_agents, ndims = time_series.shape # Shape [num_sims, time_steps, num_agents, ndims] time_series_stack = gnn.utils.stack_time_series(time_series[:, :-pred_steps, :, :], seg_len) # Shape [num_sims, time_steps-seg_len-pred_steps+1, seg_len, num_agents, ndims] expected_time_series_stack = gnn.utils.stack_time_series(time_series[:, seg_len:, :, :], pred_steps) # Shape [num_sims, time_steps-seg_len-pred_steps+1, pred_steps, num_agents, ndims] assert time_series_stack.shape[:2] == expected_time_series_stack.shape[:2] time_segs = time_series_stack.reshape([-1, seg_len, num_agents, ndims]) expected_time_segs = expected_time_series_stack.reshape([-1, pred_steps, num_agents, ndims]) processed_features = {'time_series': time_segs} labels = expected_time_segs if mode == 'train': return tf.estimator.inputs.numpy_input_fn( x=processed_features, y=labels, batch_size=batch_size, num_epochs=None, shuffle=True ) elif mode == 'eval': return tf.estimator.inputs.numpy_input_fn( x=processed_features, y=labels, batch_size=batch_size, shuffle=False ) elif mode == 'test': return tf.estimator.inputs.numpy_input_fn( x=processed_features, batch_size=batch_size, shuffle=False ) def main(): with open(ARGS.config) as f: model_params = json.load(f) seg_len = model_params['seg_len'] model_params['pred_steps'] = ARGS.pred_steps seq2seq_regressor = tf.estimator.Estimator( model_fn=model_fn, params=model_params, model_dir=ARGS.log_dir) if ARGS.train: train_data = load_data(ARGS.data_dir, ARGS.data_transpose, edge=False, prefix='train') features = {'time_series': train_data} train_input_fn = input_fn(features, seg_len, ARGS.pred_steps, ARGS.batch_size, 'train') seq2seq_regressor.train(input_fn=train_input_fn, steps=ARGS.train_steps) # Evaluation if ARGS.eval: valid_data = load_data(ARGS.data_dir, ARGS.data_transpose, edge=False, prefix='valid') features = {'time_series': valid_data} eval_input_fn = input_fn(features, seg_len, ARGS.pred_steps, ARGS.batch_size, 'eval') eval_results = seq2seq_regressor.evaluate(input_fn=eval_input_fn) if not ARGS.verbose: print('Evaluation results: {}'.format(eval_results)) # Prediction if ARGS.test: test_data = load_data(ARGS.data_dir, ARGS.data_transpose, edge=False, prefix='test') features = {'time_series': test_data} predict_input_fn = input_fn(features, seg_len, ARGS.pred_steps, ARGS.batch_size, 'test') prediction = seq2seq_regressor.predict(input_fn=predict_input_fn) prediction = np.array([pred['next_steps'] for pred in prediction]) np.save(os.path.join(ARGS.log_dir, 'prediction_{}.npy'.format( ARGS.pred_steps)), prediction) if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('--data-dir', type=str, help='data directory') parser.add_argument('--data-transpose', type=int, nargs=4, default=None, help='axes for data transposition') parser.add_argument('--config', type=str, help='model config file') parser.add_argument('--log-dir', type=str, help='log directory') parser.add_argument('--train-steps', type=int, default=1000, help='number of training steps') parser.add_argument('--pred-steps', type=int, default=1, help='number of steps the estimator predicts for time series') parser.add_argument('--batch-size', type=int, default=128, help='batch size') parser.add_argument('--verbose', action='store_true', default=False, help='turn on logging info') parser.add_argument('--train', action='store_true', default=False, help='turn on training') parser.add_argument('--eval', action='store_true', default=False, help='turn on evaluation') parser.add_argument('--test', action='store_true', default=False, help='turn on test') ARGS = parser.parse_args() ARGS.data_dir = os.path.expanduser(ARGS.data_dir) ARGS.config = os.path.expanduser(ARGS.config) ARGS.log_dir = os.path.expanduser(ARGS.log_dir) if ARGS.verbose: tf.logging.set_verbosity(tf.logging.INFO) main()
[ "szhou@cradle.dhcp.asu.edu" ]
szhou@cradle.dhcp.asu.edu
20c8ad99c8919ac3a6c8acd8e41a0cf8cd13242f
52317902597ea3e2ae7fcb18ec4f1404f4925bde
/Code/C7_2_twoSample.py
0ce31f0784767d41fa86563d3e8a8d8db8033785
[]
no_license
akansal1/statsintro_python
3cb7a1a3878a59c0552f3d3ad3f2e83fc36b3203
0660a0fffcfac11abfe7e6e8c19f1ba0c25e0d36
refs/heads/master
2021-01-22T00:59:23.195619
2015-08-12T12:12:44
2015-08-12T12:12:44
null
0
0
null
null
null
null
UTF-8
Python
false
false
3,377
py
''' Comparison of two groups - Analysis of paired data - Analysis of unpaired data ''' # author: Thomas Haslwanter, date: July-2013 # Import standard packages import numpy as np import matplotlib.pyplot as plt import scipy.stats as stats # additional packages from C2_8_getdata import getData def paired_data(): '''Analysis of paired data Compare mean daily intake over 10 pre-menstrual and 10 post-menstrual days (in kJ).''' # Get the data: daily intake of energy in kJ for 11 women data = getData('altman_93.txt', subDir=r'..\Data\data_altman') np.mean(data, axis=0) np.std(data, axis=0, ddof=1) pre = data[:,0] post = data[:,1] # --- >>> START stats <<< --- # paired t-test: doing two measurments on the same experimental unit # e.g., before and after a treatment t_statistic, p_value = stats.ttest_1samp(post - pre, 0) # p < 0.05 => alternative hypothesis: # the difference in mean is not equal to 0 print(("paired t-test", p_value)) # alternative to paired t-test when data has an ordinary scale or when not # normally distributed rankSum, p_value = stats.wilcoxon(post - pre) # --- >>> STOP stats <<< --- print(("Wilcoxon-Signed-Rank-Sum test", p_value)) return p_value # should be 0.0033300139117459797 def unpaired_data(): ''' Then some unpaired comparison: 24 hour total energy expenditure (MJ/day), in groups of lean and obese women''' # Get the data: energy expenditure in mJ and stature (0=obese, 1=lean) energ = getData('altman_94.txt', subDir=r'..\Data\data_altman') # Group them group1 = energ[:, 1] == 0 group1 = energ[group1][:, 0] group2 = energ[:, 1] == 1 group2 = energ[group2][:, 0] np.mean(group1) np.mean(group2) # --- >>> START stats <<< --- # two-sample t-test # null hypothesis: the two groups have the same mean # this test assumes the two groups have the same variance... # (can be checked with tests for equal variance) # independent groups: e.g., how boys and girls fare at an exam # dependent groups: e.g., how the same class fare at 2 different exams t_statistic, p_value = stats.ttest_ind(group1, group2) # p_value < 0.05 => alternative hypothesis: # they don't have the same mean at the 5% significance level print(("two-sample t-test", p_value)) # For non-normally distributed data, perform the two-sample wilcoxon test # a.k.a Mann Whitney U u, p_value = stats.mannwhitneyu(group1, group2) print(("Mann-Whitney test", p_value)) # --- >>> STOP stats <<< --- # Plot the data plt.plot(group1, 'bx', label='obese') plt.hold(True) plt.plot(group2, 'ro', label='lean') plt.legend(loc=0) plt.show() # The same calculations, but implemented with pandas, would be: #import pandas as pd #df = pd.DataFrame(energ, columns = ['energy', 'weightClass']) #grouped = df.groupby('weightClass') #grouped.mean() #t_statistic, p_value = stats.ttest_ind(grouped.get_group(0).energy, grouped.get_group(1).energy) #grouped.energy.plot(marker='o', lw=0) #plt.legend(['obese', 'lean']) #plt.show() return p_value # should be 0.0010608066929400244 if __name__ == '__main__': paired_data() unpaired_data()
[ "thomas.haslwanter@alumni.ethz.ch" ]
thomas.haslwanter@alumni.ethz.ch
a46b63796813396cf5f27b0c2a94d8e2a901303f
b466a62a6b8151937212688c09b3a5704eaa7466
/Polymorphism and Abstraction - Exercise/Account.py
961264831a48307859fb29848be71048b5c377c3
[ "MIT" ]
permissive
DiyanKalaydzhiev23/OOP---Python
89efa1a08056375496278dac3af97e10876f7728
7ac424d5fb08a6bd28dc36593e45d949b3ac0cd0
refs/heads/main
2023-07-08T08:23:05.148293
2021-08-13T12:09:12
2021-08-13T12:09:12
383,723,287
2
0
null
null
null
null
UTF-8
Python
false
false
2,158
py
class Account: def __init__(self, owner, amount=0): self.owner = owner self.amount = amount self._transactions = [] def add_transaction(self, amount): if not isinstance(amount, int): raise ValueError("please use int for amount") self._transactions.append(amount) @property def balance(self): return self.amount + sum(self._transactions) @staticmethod def validate_transaction(account, amount_to_add): if account.balance + amount_to_add < 0: raise ValueError("sorry cannot go in debt!") account.add_transaction(amount_to_add) return f"New balance: {account.balance}" def __repr__(self): return f"Account({self.owner}, {self.amount})" def __str__(self): return f"Account of {self.owner} with starting amount: {self.amount}" def __len__(self): return len(self._transactions) def __getitem__(self, item): return self._transactions[item] def __gt__(self, other): return self.balance > other.balance def __ge__(self, other): return self.balance >= other.balance def __lt__(self, other): return self.balance < other.balance def __le__(self, other): return self.balance <= other.balance def __eq__(self, other): return self.balance == other.balance def __ne__(self, other): return self.balance != other.balance def __add__(self, other): result = Account(f"{self.owner}&{other.owner}", self.amount + other.amount) result._transactions = self._transactions + other._transactions return result acc = Account('bob', 10) acc2 = Account('john') print(acc) print(repr(acc)) acc.add_transaction(20) acc.add_transaction(-20) acc.add_transaction(30) print(acc.balance) print(len(acc)) for transaction in acc: print(transaction) print(acc[1]) print(list(reversed(acc))) acc2.add_transaction(10) acc2.add_transaction(60) print(acc > acc2) print(acc >= acc2) print(acc < acc2) print(acc <= acc2) print(acc == acc2) print(acc != acc2) acc3 = acc + acc2 print(acc3) print(acc3._transactions)
[ "diankostadenov@gmail.com" ]
diankostadenov@gmail.com
efa63bf4d2e5243df5f84326198b1985a4e77a50
79a8789ce32ffc204e1a427f9a67102e70d90705
/utils/data.py
e53e8252225aed2027ed9fb00538e61b00c799d1
[]
no_license
jhunufernandes/kabum-flask
d4f21ef01e1ba59f7f69acd1b92a6219789d5cf1
f78046018d2f769405f05280b3a712459060774a
refs/heads/master
2022-12-22T05:14:01.372455
2020-09-24T10:08:59
2020-09-24T10:08:59
null
0
0
null
null
null
null
UTF-8
Python
false
false
523
py
import datetime import json import requests from flask import jsonify products = [ '85197', '79936', '87400' ] list_products = [] for p in products: r = requests.get( f'https://servicespub.prod.api.aws.grupokabum.com.br/descricao/v1/descricao/produto/{p}') product = json.loads(r.content) list_products.append(product) def response_header(data): return jsonify({ "date": datetime.datetime.now(), "quantity_total": len(list_products), "results": data })
[ "root@impulcetto.localdomain" ]
root@impulcetto.localdomain
5b62c5a0e9de5abd365e6aa5db2e30b54f706c5c
6071739a1892d590e1c644a562f1a5eaf2e977a0
/build.py
61ad2daaa901f3ab9247aebb7cca6510d9c9aad2
[]
no_license
exit99/uni_rankings
b744edafc4da98845ea2521ea0a04cac61fd915a
41d88e92f794a06209b178b1fdc7cbaf8bb10f3e
refs/heads/master
2021-06-17T10:45:47.794385
2017-05-01T20:57:01
2017-05-01T20:57:01
null
0
0
null
null
null
null
UTF-8
Python
false
false
3,484
py
import csv import json import ntpath import os import re import xlrd from cubes import Workspace from helper import create_table_from_csv from sqlalchemy import create_engine def build(db): datadir = os.path.join(os.path.dirname(os.path.abspath(__file__)), "data") files = [os.path.join("data", f) for f in os.listdir(datadir)] build_models(files) for f in files: build_table(f, db) def build_table(f, db): sh = get_sheet(f) csv_name = "_temp_file.csv" your_csv_file = open(csv_name, 'w') wr = csv.writer(your_csv_file, quoting=csv.QUOTE_ALL) for rownum in range(sh.nrows): wr.writerow(sh.row_values(rownum)) your_csv_file.close() engine = create_engine(db) cols, types = get_columns(f) cubes = [cube_name(col) for col in cols] mapper = {str: "string", float: "float", bool: "boolean"} fields = [(col, mapper[type_]) for col, type_ in zip(cols, types)] for cube in cubes: print(cube) create_table_from_csv( engine, csv_name, table_name=cube, fields=fields, create_id=True) os.remove(csv_name) def build_models(files): model = {} for f in files: model.update(build_model(f)) with open("model.json", "w") as f: f.write(json.dumps(model, indent=2, sort_keys=True)) def build_model(f): cols, types = get_columns(f) return { "cubes": [build_cube(f, type_, col, cols) for col, type_ in zip(cols, types)], "dimensions": build_dimensions(f, cols), } def get_columns(f): worksheet = get_sheet(f) cols = [worksheet.cell_value(0, col_num) for col_num in range(0, worksheet.ncols)] types = [get_type(worksheet, col_num) for col_num in range(0, worksheet.ncols)] return cols, types def get_type(worksheet, column): types = {1: str, 2: float, 4: bool} for row in range(1, 100): type_ = types.get(worksheet.cell_type(row, column)) if type_: return type_ def build_cube(f, col_type, col_name, all_cols): type_args = { bool: ["count"], float: ["count", "avg", "max", "min"], str: ["count"], } return { "name": cube_name(col_name), "label": col_name, "dimensions": [dimension_name(f, col) for col in all_cols], "measures": [{"name": col_name, "label": col_name}], "aggregates": build_aggregates(col_name, type_args[col_type]), } def build_aggregates(column, args): return [{ "name": "{}_{}".format(column, arg), "function": arg, "measure": column, "label": arg, } for arg in args] def build_dimensions(f, all_cols): return [{ "name": dimension_name(f, col), "levels": [build_level(col)], "label": col, } for col in all_cols] def build_level(col): return { "name": level_name(col), "label": col, "attributes": [col] } def cube_name(col): return sanitize_name("{}_cube".format(col)) def dimension_name(f, col): fbase = ntpath.basename(f).split(".")[0] return sanitize_name("{}_{}_dimension".format(fbase, col)) def level_name(level): return sanitize_name("{}_level".format(level)) def get_sheet(f): wb = xlrd.open_workbook(f) return wb.sheet_by_name(wb.sheet_names()[0]) def sanitize_name(name): name = re.sub('[^0-9a-zA-Z]+', '_', name) return name.replace(' ', '_') if __name__ == "__main__": build("sqlite:///data.sqlite")
[ "kazanski.zachary@gmail.com" ]
kazanski.zachary@gmail.com
2415ec820147927b2e048a770112efeec19fe1a3
59ee0e3756f574cb8f08fc0b99d3ac4d463f1af4
/mqttPahoTest.py
900bd2f8061004240822791f712a00c9ed24c6dc
[]
no_license
anyfini/MQTT-Test
47d6f3915cd41ae20cb036fba130d487f492294e
6d4a79ce5a56b41a40a1e9c3b8f5829ed34ce82c
refs/heads/main
2023-07-18T09:08:33.331261
2021-08-20T09:01:32
2021-08-20T09:01:32
387,198,176
0
0
null
null
null
null
UTF-8
Python
false
false
1,477
py
import paho.mqtt.client as mqtt import time def on_log(client, userdata, level, buf): print("log: " + buf) def on_connect(client, userdata, flags, rc): if rc == 0: print("connected OK") else: print("Bad connection Returned code =", rc) def on_disconnect(client, userdata, rc): if rc != 0: print("Unexpected disconnection(Sub)!") def on_message(client, userdata, msg): topic = msg.topic m_decode = str(msg.payload.decode("utf-8", "ignore")) print("message received :", m_decode) #print("message received", msg.payload) # falls nur so werden noch um die Nachricht Klammern gesetzt ect. client.disconnect() def on_publish(client, userdata, mid): print("Publisher send message") ###BROKER### # broker = "127.0.0.1" #Localhost #broker = "test.mosquitto.org" #Online Broker broker = "169.254.173.239" #Raspi Broker LAN #broker = "192.168.0.16" #Raspi via Wlan client = mqtt.Client("python1") client.on_connect = on_connect #client.on_log = on_log # eigentlich lässt man das immer weg client.on_disconnect = on_disconnect client.on_message = on_message client.on_publish = on_publish print("Connecting to broker", broker) client.connect(broker) client.loop_start() client.subscribe("MQTT/Test1") time.sleep(1) client.publish("MQTT/Test1", "my first message") time.sleep(5) client.loop_stop() client.disconnect()
[ "38047116+anyfini@users.noreply.github.com" ]
38047116+anyfini@users.noreply.github.com
429c3366e4476dfc70f52fc6a8d63ee4d525913a
36349866405d0567f5ce1c73c7350d77807b5a31
/dp_primitive_calculator.py
4af550a27693576863bb0b6f56b76b0a3123a50c
[]
no_license
kajarenc/stepicAlgo2015
6397e43e145e7631711be583bc27a7d0d9a2c937
502b397387be303f666cd08c0028887b92a00880
refs/heads/master
2021-01-10T09:37:37.900645
2015-12-09T00:34:08
2015-12-09T00:34:08
45,359,382
0
0
null
null
null
null
UTF-8
Python
false
false
586
py
n = int(input()) dp = [0, 0] for i in range(2, n + 1): doubling = 100000000 trebling = 100000000 if i % 2 == 0: doubling = dp[i // 2] if i % 3 == 0: trebling = dp[i // 3] increment = dp[i - 1] dp.append(min(increment, doubling, trebling) + 1) print(dp[-1]) res = [] j = n while j != 1: res.append(j) if dp[j] == dp[j - 1] + 1: j -= 1 elif j % 2 == 0 and dp[j] == dp[j // 2] + 1: j //= 2 elif j % 3 == 0 and dp[j] == dp[j // 3] + 1: j //= 3 res.append(1) for elem in reversed(res): print(elem, end=" ")
[ "kajarenc@gmail.com" ]
kajarenc@gmail.com