Skip to content

Commit

Permalink
Merge branch 'main' of https://github.com/Kaiyotech/Opti
Browse files Browse the repository at this point in the history
  • Loading branch information
Kaiyotech committed May 9, 2023
2 parents 704cf64 + 63b2379 commit d012d6e
Show file tree
Hide file tree
Showing 5 changed files with 38 additions and 7 deletions.
27 changes: 26 additions & 1 deletion CoyoteObs.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@
from gym import Space
from gym.spaces import Tuple, Box

from rlgym.utils.common_values import BOOST_LOCATIONS
from rlgym.utils.common_values import BOOST_LOCATIONS, BALL_RADIUS

from numba import njit

Expand Down Expand Up @@ -76,8 +76,14 @@ def __init__(self, tick_skip=8, team_size=3, expanding: bool = True, extra_boost
end_object: PhysicsObject = None,
mask_aerial_opp=False,
selector_infinite_boost=None,
doubletap_indicator=False,
):
super().__init__()
self.doubletap_indicator = doubletap_indicator
if self.doubletap_indicator:
self.floor_bounce = False
self.backboard_bounce = False
self.prev_ball_vel = np.asarray([0] * 3)
assert not (selector_infinite_boost is not None and not selector)
self.n = 0
self.selector_infinite_boost = selector_infinite_boost
Expand Down Expand Up @@ -225,6 +231,11 @@ def reset(self, initial_state: GameState):
if self.add_handbrake:
self.handbrakes = np.zeros(len(initial_state.players))

if self.doubletap_indicator:
self.floor_bounce = False
self.backboard_bounce = False
self.prev_ball_vel = np.array(initial_state.ball.linear_velocity)

def pre_step(self, state: GameState):
# dist = state.ball.position - state.players[0].car_data.position
# dist_norm = np.linalg.norm(dist)
Expand Down Expand Up @@ -253,6 +264,17 @@ def pre_step(self, state: GameState):
state.ball.linear_velocity = np.asarray([0, 0, 0])
state.ball.angular_velocity = np.asarray([0, 0, 0])

# for double tap
if self.doubletap_indicator:
if state.ball.position[2] < BALL_RADIUS * 2 and 0.55 * self.prev_ball_vel[2] \
< state.ball.linear_velocity[2] > 0.65 * self.prev_ball_vel[2]:
self.floor_bounce = True
elif 0.55 * self.prev_ball_vel[1] < state.ball.linear_velocity[1] > 0.65 * \
self.prev_ball_vel[1] and \
abs(state.ball.position[1]) > 4900 and state.ball.position[2] > 500:
self.backboard_bounce = True
self.prev_ball_vel = np.array(state.ball.linear_velocity)

def _update_timers(self, state: GameState):
current_boosts = state.boost_pads
boost_locs = self.boost_locations
Expand Down Expand Up @@ -714,6 +736,9 @@ def add_players_to_obs(self, obs: List, state: GameState, player: PlayerData, ba
ball.position, ball.linear_velocity, prev_act
)

if self.doubletap_indicator:
player_data.extend(list([int(self.backboard_bounce), int(self.floor_bounce)]))

if self.stack_size != 0:
if self.selector:
self.model_add_action_to_stack(
Expand Down
2 changes: 2 additions & 0 deletions learner_demo.py
Original file line number Diff line number Diff line change
Expand Up @@ -88,6 +88,8 @@
velocity_po_w=0.001,
vel_po_mult_ss=5,
vel_po_mult_neg=0.01,
boost_gain_w=0.5,
boost_spend_w=-0.3,
tick_skip=Constants_demo.FRAME_SKIP,
),
lambda: CoyoteAction(),
Expand Down
9 changes: 5 additions & 4 deletions learner_dtap.py
Original file line number Diff line number Diff line change
Expand Up @@ -51,10 +51,10 @@
ent_coef=0.01,
)

run_id = "dtap_run1.03"
run_id = "dtap_run2.00"
wandb.login(key=os.environ["WANDB_KEY"])
logger = wandb.init(dir="./wandb_store",
name="Dtap_Run1.03",
name="Dtap_Run2.00",
project="Opti",
entity="kaiyotech",
id=run_id,
Expand Down Expand Up @@ -82,6 +82,7 @@
add_fliptime=True,
add_boosttime=True,
add_handbrake=True,
doubletap_indicator=True,
),
lambda: ZeroSumReward(zero_sum=Constants_dtap.ZERO_SUM,
concede_w=-10,
Expand All @@ -104,11 +105,11 @@
max_age=1,
)

critic = Sequential(Linear(229, 256), LeakyReLU(), Linear(256, 128), LeakyReLU(),
critic = Sequential(Linear(231, 256), LeakyReLU(), Linear(256, 128), LeakyReLU(),
Linear(128, 128), LeakyReLU(),
Linear(128, 1))

actor = Sequential(Linear(229, 96), LeakyReLU(), Linear(96, 96), LeakyReLU(),
actor = Sequential(Linear(231, 96), LeakyReLU(), Linear(96, 96), LeakyReLU(),
Linear(96, 96), LeakyReLU(),
Linear(96, 373))

Expand Down
6 changes: 4 additions & 2 deletions worker_demo.py
Original file line number Diff line number Diff line change
Expand Up @@ -27,6 +27,8 @@
velocity_po_w=0.001,
vel_po_mult_ss=5,
vel_po_mult_neg=0.01,
boost_gain_w=0.5,
boost_spend_w=-0.3,
tick_skip=Constants_demo.FRAME_SKIP,
)
frame_skip = Constants_demo.FRAME_SKIP
Expand All @@ -49,7 +51,7 @@
batch_mode = True
team_size = 3
dynamic_game = True
infinite_boost_odds = 0
infinite_boost_odds = 0.2
host = "127.0.0.1"
epic_rl_exe_path = None # "D:/Program Files/Epic Games/rocketleague_old/Binaries/Win64/RocketLeague.exe"

Expand All @@ -75,7 +77,7 @@
evaluation_prob = 0
game_speed = 1
auto_minimize = False
infinite_boost_odds = 0
# infinite_boost_odds = 0.2
simulator = False
past_version_prob = 0

Expand Down
1 change: 1 addition & 0 deletions worker_dtap.py
Original file line number Diff line number Diff line change
Expand Up @@ -135,6 +135,7 @@
add_fliptime=True,
add_boosttime=True,
add_handbrake=True,
doubletap_indicator=True,
),
action_parser=CoyoteAction(),
terminal_conditions=[GoalScoredCondition(),
Expand Down

0 comments on commit d012d6e

Please sign in to comment.