欢迎您访问程序员文章站本站旨在为大家提供分享程序员计算机编程知识!
您现在的位置是: 首页

Leetcode 1515. Best Position for a Service Centre (python)

程序员文章站 2022-03-23 18:16:37
...

题目

Leetcode 1515. Best Position for a Service Centre (python)

解法:gradient descent

还是第一次碰到用梯度下降做的题目。这个题目是没有办法找到精确解的,所以就能想到梯度下降了。这边梯度下降还得调一调参数,固定learning rate不太行,需要learning rate decay。也可以加momentum加快速度

with lr decay

class Solution:
    def getMinDistSum(self, positions: List[List[int]]) -> float:
        n = len(positions)
        if n<=1:
            return 0
        # partial derivative regard x
        def cal_dx(x,y):
            ans = 0
            for i in range(n):
                xi = positions[i][0]
                yi = positions[i][1]
                a = x-xi
                b = ((x-xi)**2+(y-yi)**2)**0.5
                tmp = a/b if b else 0
                ans += tmp
            return ans
        # partial derivative regard y
        def cal_dy(x,y):
            ans = 0
            for i in range(n):
                xi = positions[i][0]
                yi = positions[i][1]
                a = y-yi
                b = ((x-xi)**2+(y-yi)**2)**0.5
                tmp = a/b if b else 0
                ans += tmp
            return ans
        # compute cost
        def cal_cost(x,y):
            ans = 0
            for i in range(n):
                xi = positions[i][0]
                yi = positions[i][1]
                ans += ((x-xi)**2+(y-yi)**2)**0.5
            return ans
        
        # initialize x and y
        x = sum(p[0] for p in positions) / len(positions)
        y = sum(p[1] for p in positions) / len(positions)
        prev_cost = cal_cost(x,y)
        reduced_cost = float('inf')
        
        # perform gradient descent with lr decay
        lr = 10
        decay = 1 - (10 ** -3)
        while abs(reduced_cost)>1e-15:
            dx = cal_dx(x,y)
            dy = cal_dy(x,y)
            x = x-lr*dx
            y = y-lr*dy
            curr_cost = cal_cost(x,y)
            reduced_cost = prev_cost-curr_cost
            prev_cost = curr_cost
            lr *= decay
        return curr_cost

with lr decay and momentum

class Solution:
    def getMinDistSum(self, positions: List[List[int]]) -> float:
        n = len(positions)
        if n<=1:
            return 0
        # partial derivative regard x
        def cal_dx(x,y):
            ans = 0
            for i in range(n):
                xi = positions[i][0]
                yi = positions[i][1]
                a = x-xi
                b = ((x-xi)**2+(y-yi)**2)**0.5
                tmp = a/b if b else 0
                ans += tmp
            return ans
        # partial derivative regard y
        def cal_dy(x,y):
            ans = 0
            for i in range(n):
                xi = positions[i][0]
                yi = positions[i][1]
                a = y-yi
                b = ((x-xi)**2+(y-yi)**2)**0.5
                tmp = a/b if b else 0
                ans += tmp
            return ans
        # compute cost
        def cal_cost(x,y):
            ans = 0
            for i in range(n):
                xi = positions[i][0]
                yi = positions[i][1]
                ans += ((x-xi)**2+(y-yi)**2)**0.5
            return ans
        
        # initialize x and y
        x = sum(p[0] for p in positions) / len(positions)
        y = sum(p[1] for p in positions) / len(positions)
        prev_cost = cal_cost(x,y)
        reduced_cost = float('inf')
        dx = 0
        dy = 0
        
        # perform gradient descent with momentum and lr decay
        lr = 1
        momentum = 0.9
        decay = 0.98
        while abs(reduced_cost)>1e-15:
            dx = cal_dx(x,y) + momentum*dx
            dy = cal_dy(x,y) + momentum*dy
            x = x-lr*dx
            y = y-lr*dy
            curr_cost = cal_cost(x,y)
            reduced_cost = prev_cost-curr_cost
            prev_cost = curr_cost
            lr *= decay
        return curr_cost