【无标题】

# multiAgents.py
# --------------
# Licensing Information:  You are free to use or extend these projects for
# educational purposes provided that (1) you do not distribute or publish
# solutions, (2) you retain this notice, and (3) you provide clear
# attribution to UC Berkeley, including a link to http://ai.berkeley.edu.
# 
# Attribution Information: The Pacman AI projects were developed at UC Berkeley.
# The core projects and autograders were primarily created by John DeNero
# (denero@cs.berkeley.edu) and Dan Klein (klein@cs.berkeley.edu).
# Student side autograding was added by Brad Miller, Nick Hay, and
# Pieter Abbeel (pabbeel@cs.berkeley.edu).


from util import manhattanDistance
from game import Directions
import random, util

from game import Agent
from pacman import GameState

class ReflexAgent(Agent):
    """
    A reflex agent chooses an action at each choice point by examining
    its alternatives via a state evaluation function.

    The code below is provided as a guide.  You are welcome to change
    it in any way you see fit, so long as you don't touch our method
    headers.
    """


    def getAction(self, gameState: GameState):
        """
        You do not need to change this method, but you're welcome to.

        getAction chooses among the best options according to the evaluation function.

        Just like in the previous project, getAction takes a GameState and returns
        some Directions.X for some X in the set {NORTH, SOUTH, WEST, EAST, STOP}
        """
        # Collect legal moves and successor states
        legalMoves = gameState.getLegalActions()

        # Choose one of the best actions
        scores = [self.evaluationFunction(gameState, action) for action in legalMoves]
        bestScore = max(scores)
        bestIndices = [index for index in range(len(scores)) if scores[index] == bestScore]
        chosenIndex = random.choice(bestIndices) # Pick randomly among the best

        "Add more of your code here if you want to"

        return legalMoves[chosenIndex]

    def evaluationFunction(self, currentGameState: GameState, action):
        """
        Design a better evaluation function here.

        The evaluation function takes in the current and proposed successor
        GameStates (pacman.py) and returns a number, where higher numbers are better.

        The code below extracts some useful information from the state, like the
        remaining food (newFood) and Pacman position after moving (newPos).
        newScaredTimes holds the number of moves that each ghost will remain
        scared because of Pacman having eaten a power pellet.

        Print out these variables to see what you're getting, then combine them
        to create a masterful evaluation function.
        """
        # Useful information you can extract from a GameState (pacman.py)
        successorGameState = currentGameState.generatePacmanSuccessor(action)
        newPos = successorGameState.getPacmanPosition()#新的pacman位置
        newFood = successorGameState.getFood()#新的所有食物点的位置,是二维数组
        newGhostStates = successorGameState.getGhostStates()#新的幽灵的状态
        newScaredTimes = [ghostState.scaredTimer for ghostState in newGhostStates]
        
        "*** YOUR CODE HERE ***"
        point = 0#定义分数
        for time in newScaredTimes:
            point += time
        if action == "Stop":
            return -100
        foodlist = newFood.asList()
        distance = 100
        for food in foodlist:
            distance = min(distance, manhattanDistance(newPos, food))
        food_point = float(1.0/distance)
        Distance = 100
        for ghost in newGhostStates:
            if newPos == ghost.getPosition():
                point -= 100
            Distance = min(Distance, manhattanDistance(newPos, ghost.getPosition()))
        if Distance == 0:
            ghost_point = 100
        else:
            ghost_point = float(1.0/Distance)
        point = point + food_point - ghost_point        
        point += successorGameState.getScore()
        return point

def scoreEvaluationFunction(currentGameState: GameState):
    """
    This default evaluation function just returns the score of the state.
    The score is the same one displayed in the Pacman GUI.

    This evaluation function is meant for use with adversarial search agents
    (not reflex agents).
    """
    return currentGameState.getScore()

class MultiAgentSearchAgent(Agent):
    """
    This class provides some common elements to all of your
    multi-agent searchers.  Any methods defined here will be available
    to the MinimaxPacmanAgent, AlphaBetaPacmanAgent & ExpectimaxPacmanAgent.

    You *do not* need to make any changes here, but you can if you want to
    add functionality to all your adversarial search agents.  Please do not
    remove anything, however.

    Note: this is an abstract class: one that should not be instantiated.  It's
    only partially specified, and designed to be extended.  Agent (game.py)
    is another abstract class.
    """

    def __init__(self, evalFn = 'scoreEvaluationFunction', depth = '2'):
        self.index = 0 # Pacman is always agent index 0
        self.evaluationFunction = util.lookup(evalFn, globals())
        self.depth = int(depth)

class MinimaxAgent(MultiAgentSearchAgent):
    """
    Your minimax agent (question 2)
    """

    def getAction(self, gameState: GameState):
        """
        Returns the minimax action from the current gameState using self.depth
        and self.evaluationFunction.

        Here are some method calls that might be useful when implementing minimax.

        gameState.getLegalActions(agentIndex):
        Returns a list of legal actions for an agent
        agentIndex=0 means Pacman, ghosts are >= 1

        gameState.generateSuccessor(agentIndex, action):
        Returns the successor game state after an agent takes an action

        gameState.getNumAgents():
        Returns the total number of agents in the game

        gameState.isWin():
        Returns whether or not the game state is a winning state

        gameState.isLose():
        Returns whether or not the game state is a losing state
        """
        "*** YOUR CODE HERE ***"
        pac_next = gameState.getLegalActions(0)
        Action = None
        point = -float('inf')
        for pac_action in pac_next:
            value = self.min_value(gameState.generateSuccessor(0,pac_action))
            if value > point:
                point = value
                Action = pac_action
        return Action
    
    def max_value(self, gameState, depth = 0, agentIndex = 0):#表示求当前状态下,所属的所有子状态的最大值
        legal_action = gameState.getLegalActions(agentIndex)
        MAX = -float('inf')
        if depth == self.depth or not legal_action:#终止条件
            return self.evaluationFunction(gameState)
        for act in legal_action:#表示遍历所有可能的行动,每个行动产生一个结果,最后这个结果取一个最大值。每个行动的结果是由Ghost做的,所以是min_value
            value = self.min_value(gameState.generateSuccessor(agentIndex, act), depth, 1)
            if value is not None and value > MAX:
                MAX = value
        return MAX

    def min_value(self, gameState, depth = 0, agentIndex = 1):#表示求当前状态下,所属的所有子状态的最小值
        legal_action = gameState.getLegalActions(agentIndex)
        MIN = float('inf')  
        if depth == self.depth or not legal_action:
            return self.evaluationFunction(gameState)   
        for act in legal_action:
            if agentIndex == gameState.getNumAgents() - 1:
                value = self.max_value(gameState.generateSuccessor(agentIndex, act), depth + 1, 0)
            else:
                value = self.min_value(gameState.generateSuccessor(agentIndex, act), depth, agentIndex + 1)
            if value is not None and value < MIN:
                MIN = value
        return MIN

        
                

class AlphaBetaAgent(MultiAgentSearchAgent):
    """
    Your minimax agent with alpha-beta pruning (question 3)
    """

    def getAction(self, gameState: GameState):
        """
        Returns the minimax action using self.depth and self.evaluationFunction
        """
        "*** YOUR CODE HERE ***"
        return self.max_value(gameState)[1]
    
    def max_value(self, gameState, depth=0, agentIndex=0, alpha=-float('inf'), beta=float('inf')):
        # 终止条件
        legal_action = gameState.getLegalActions(agentIndex)
        if depth == self.depth or not legal_action:
            return self.evaluationFunction(gameState), None
        # 初始化
        MAX = -float('inf')
        Action = None
        # 遍历吃豆人可能的下一步
        for act in legal_action:
            # 获取当前行动产生的新状态的估值
            next_state = gameState.generateSuccessor(agentIndex, act)
            value = self.min_value(next_state, depth, 1, alpha, beta)[0]
            # 判断是否找到更好的行动
            if value > MAX:
                MAX, Action = value, act
                # 更新 alpha 值
                alpha = max(alpha, MAX)
            # α-β剪枝条件
            if MAX > beta:
                break
        return MAX, Action
        
    def min_value(self, gameState, depth=0, agentIndex=0, alpha=-float('inf'), beta=float('inf')):
        # 终止条件
        legal_action = gameState.getLegalActions(agentIndex)
        if depth == self.depth or not legal_action:
            return self.evaluationFunction(gameState), None
        # 初始化
        MIN = float('inf')
        Action = None
        # 遍历当前鬼怪可能的下一步
        for act in legal_action:
            successor = gameState.generateSuccessor(agentIndex, act)
            if agentIndex < gameState.getNumAgents() - 1:
                # 不是最后一个幽灵
                value = self.min_value(successor, depth, agentIndex + 1, alpha, beta)[0]
            else:
                # 如果是最后一个幽灵的话,就切换到吃豆人
                value = self.max_value(successor, depth + 1, 0, alpha, beta)[0]
            # 选取最小值
            if value < MIN:
                MIN, Action = value, act
                # 更新 beta 值
                beta = min(beta, MIN)
            # α-β剪枝条件
            if MIN < alpha:
                break
        return MIN, Action
    

class ExpectimaxAgent(MultiAgentSearchAgent):
    """
      Your expectimax agent (question 4)
    """

    def getAction(self, gameState: GameState):
        """
        Returns the expectimax action using self.depth and self.evaluationFunction

        All ghosts should be modeled as choosing uniformly at random from their
        legal moves.
        """
        "*** YOUR CODE HERE ***"
        return self.max_value(gameState)

    def max_value(self, gameState, depth=0, agentIndex=0):
        # 获得吃豆人所有合法的下一步行动
        legal_action = gameState.getLegalActions(agentIndex)
        # 终止条件
        if depth == self.depth or not legal_action:
            return self.evaluationFunction(gameState)
        # 初始化
        MAX = None
        Action = None
        for act in legal_action:
            # 从第一个幽灵开始,进行Expectimax操作
            value = self.Expectation(gameState.generateSuccessor(agentIndex, act), depth, 1)
            if value is not None and (MAX == None or value > MAX):
               MAX = value
               Action = act
        if depth == 0 and agentIndex == 0:
            return Action
        else:
            return MAX

    def Expectation(self, gameState, depth=0, agentIndex=0):
        legal_action = gameState.getLegalActions(agentIndex)
        # 终止条件
        if depth == self.depth or not legal_action:
            return self.evaluationFunction(gameState)
            # 初始化效用值总计
        effect = 0
        action_num = len(legal_action)
        # 轮询当前鬼怪所有可行的下一步
        for act in legal_action:
            #挨个遍历各个鬼怪,计算Expectation值,并计入效用总计
            if agentIndex < gameState.getNumAgents() - 1:
                effect += self.Expectation(gameState.generateSuccessor(agentIndex, act), depth, agentIndex + 1)
            else:
                #如果是最后一个鬼怪,那么接下来要去算吃豆人的MAX值,并计入效用总计
                effect += self.max_value(gameState.generateSuccessor(agentIndex, act), depth + 1, 0)
        # 最后需要把所有可能的下一步的效用值求平均,并返回
        return effect / float(action_num)


def betterEvaluationFunction(currentGameState: GameState):
    """
    Your extreme ghost-hunting, pellet-nabbing, food-gobbling, unstoppable
    evaluation function (question 5).

    DESCRIPTION: <write something here so we know what you did>
    """
    "*** YOUR CODE HERE ***"
    successorGameState = currentGameState
    newPos = successorGameState.getPacmanPosition()#新的pacman位置
    newFood = successorGameState.getFood()#新的所有食物点的位置,是二维数组
    newGhostStates = successorGameState.getGhostStates()#新的幽灵的状态
    newScaredTimes = [ghostState.scaredTimer for ghostState in newGhostStates]
    
    "*** YOUR CODE HERE ***"
    point = 0#定义分数
    for time in newScaredTimes:
        point += time
    foodlist = newFood.asList()
    distance = 100
    for food in foodlist:
        distance = min(distance, manhattanDistance(newPos, food))
    food_point = float(1.0/distance)
    Distance = 100
    for ghost in newGhostStates:
        if newPos == ghost.getPosition():
            point -= 100
        Distance = min(Distance, manhattanDistance(newPos, ghost.getPosition()))
    if Distance == 0:
        ghost_point = 100
    else:
        ghost_point = float(1.0/Distance)
    point = point + food_point - ghost_point        
    point += successorGameState.getScore()
    return point
    

# Abbreviation
better = betterEvaluationFunction

123123

  • 6
    点赞
  • 6
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值