ABOUT ME

-

Today
-
Yesterday
-
Total
-
  • ML lab 03 - Linear Regression 의 cost 최소화의 TensorFlow 구현
    IT/딥러닝(모두를 위한 딥러닝) 2017. 11. 7. 18:41

    소스코드 1

    import tensorflow as tf
    import matplotlib.pyplot as plt
    tf.set_random_seed(777)

    X = [1,2,3]
    Y = [1,2,3]

    W = tf.placeholder(tf.float32)

    #Hypothesis를 X*W 의 linear model로 정한다.
    hypothesis = X*W

    #cost(또는 loss) function. MSE(Mean Square Error)방식
    cost = tf.reduce_mean(tf.square(hypothesis - Y))

    #세션을 만든다.
    sess = tf.Session()

    #cost function을 그리기 위한 변수를 만든다
    W_history = []
    cost_history = []

    for i in range(-30, 50):
    curr_W = i * 0.1
    curr_cost = sess.run(cost, feed_dict={W: curr_W})
    W_history.append(curr_W)
    cost_history.append(curr_cost)

    #cost function 그리기
    plt.plot(W_history, cost_history)
    plt.show()


    실행결과


    소스코드2

    import tensorflow as tf
    tf.set_random_seed(777)

    x_data = [1,2,3]
    y_data = [1,2,3]

    #y_data = W * x_data + b에 적합한 W와 b를 찾도록 한다.
    #이 예제에서 b는 일단 0으로...
    W = tf.Variable(tf.random_normal([1]), name='weight')

    X = tf.placeholder(tf.float32)
    Y = tf.placeholder(tf.float32)

    #Hypothesis를 X * W인 linear model을 사용
    hypothesis = X * W

    #cost(또는 loss) function
    cost = tf.reduce_mean(tf.square(hypothesis - Y))

    #GradientDescentAlgorithm을 사용하는데, tf를 사용하지않고
    #W = W - learning_rate * derivative를 직접 사용
    learning_rate = 0.1
    gradient = tf.reduce_mean((W*X - Y) * X)
    descent = W - learning_rate * gradient
    update = W.assign(descent)

    #세션을 만듦
    sess = tf.Session()
    #글로벌 변수 초기화
    sess.run(tf.global_variables_initializer())

    for step in range(21):
    sess.run(update, feed_dict={X: x_data, Y: y_data})
    print(step, sess.run(cost, feed_dict={X: x_data, Y: y_data}), sess.run(W))


    실행결과2

    0 1.93919 [ 1.64462376]

    1 0.551591 [ 1.34379935]

    2 0.156897 [ 1.18335962]

    3 0.0446285 [ 1.09779179]

    4 0.0126943 [ 1.05215561]

    5 0.00361082 [ 1.0278163]

    6 0.00102708 [ 1.01483536]

    7 0.000292144 [ 1.00791216]

    8 8.30968e-05 [ 1.00421977]

    9 2.36361e-05 [ 1.00225055]

    10 6.72385e-06 [ 1.00120032]

    11 1.91239e-06 [ 1.00064015]

    12 5.43968e-07 [ 1.00034142]

    13 1.54591e-07 [ 1.00018203]

    14 4.39416e-08 [ 1.00009704]

    15 1.24913e-08 [ 1.00005174]

    16 3.5322e-09 [ 1.00002754]

    17 9.99824e-10 [ 1.00001466]

    18 2.88878e-10 [ 1.00000787]

    19 8.02487e-11 [ 1.00000417]

    20 2.34053e-11 [ 1.00000226]


    소스코드3

    import tensorflow as tf
    tf.set_random_seed(777)

    #입력값
    X = [1, 2, 3]
    Y = [1, 2, 3]

    #틀린 weight을 생성
    W = tf.Variable(5.0)

    #Linear model
    hypothesis = X*W

    #cost(또는 loss) function
    cost = tf.reduce_mean(tf.square(hypothesis - Y))

    #경사하강법 사용
    optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.1)
    train = optimizer.minimize(cost)

    #세션을 만듦
    sess = tf.Session()
    #글로벌 변수 초기화
    sess.run(tf.global_variables_initializer())

    for step in range(100):
    print(step, sess.run(W))
    sess.run(train)


    실행결과3

    0 5.0

    1 1.26667

    2 1.01778

    3 1.00119

    4 1.00008

    5 1.00001

    6 1.0

    7 1.0

    8 1.0

    9 1.0

    10 1.0

    11 1.0

    12 1.0

    13 1.0

    14 1.0

    15 1.0

    16 1.0

    17 1.0

    18 1.0

    19 1.0

    20 1.0

    21 1.0

    22 1.0

    23 1.0

    24 1.0

    25 1.0

    26 1.0

    27 1.0

    28 1.0

    29 1.0

    30 1.0

    31 1.0

    32 1.0

    33 1.0

    34 1.0

    35 1.0

    36 1.0

    37 1.0

    38 1.0

    39 1.0

    40 1.0

    41 1.0

    42 1.0

    43 1.0

    44 1.0

    45 1.0

    46 1.0

    47 1.0

    48 1.0

    49 1.0

    50 1.0

    51 1.0

    52 1.0

    53 1.0

    54 1.0

    55 1.0

    56 1.0

    57 1.0

    58 1.0

    59 1.0

    60 1.0

    61 1.0

    62 1.0

    63 1.0

    64 1.0

    65 1.0

    66 1.0

    67 1.0

    68 1.0

    69 1.0

    70 1.0

    71 1.0

    72 1.0

    73 1.0

    74 1.0

    75 1.0

    76 1.0

    77 1.0

    78 1.0

    79 1.0

    80 1.0

    81 1.0

    82 1.0

    83 1.0

    84 1.0

    85 1.0

    86 1.0

    87 1.0

    88 1.0

    89 1.0

    90 1.0

    91 1.0

    92 1.0

    93 1.0

    94 1.0

    95 1.0

    96 1.0

    97 1.0

    98 1.0

    99 1.0


    소스코드3

    import tensorflow as tf
    tf.set_random_seed(777)

    #입력값
    X = [1, 2, 3]
    Y = [1, 2, 3]

    #초기값 W 설정
    W = tf.Variable(5.)

    #선형모델
    hypothesis = X*W

    #경사 계산
    gradient = tf.reduce_mean((W*X - Y)*X)*2

    #cost(또는 loss) function
    cost = tf.reduce_mean(tf.square(hypothesis - Y))

    #경사하강법
    optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.01)
    train = optimizer.minimize(cost)

    #경사 구하기
    gvs = optimizer.compute_gradients(cost, [W])
    #경사 적용
    apply_gradients = optimizer.apply_gradients(gvs)

    #세션 만들기
    sess = tf.Session()
    #global variable 초기화
    sess.run(tf.global_variables_initializer())

    for step in range(100):
    print(step, sess.run([gradient, W, gvs]))
    sess.run(apply_gradients)


    실행결과3

    0 [37.333332, 5.0, [(37.333336, 5.0)]]

    1 [33.848888, 4.6266665, [(33.848888, 4.6266665)]]

    2 [30.689657, 4.2881775, [(30.689657, 4.2881775)]]

    3 [27.825287, 3.9812808, [(27.825287, 3.9812808)]]

    4 [25.228262, 3.703028, [(25.228264, 3.703028)]]

    5 [22.873621, 3.4507453, [(22.873623, 3.4507453)]]

    6 [20.738752, 3.2220092, [(20.738752, 3.2220092)]]

    7 [18.803137, 3.0146217, [(18.803137, 3.0146217)]]

    8 [17.048176, 2.8265903, [(17.048176, 2.8265903)]]

    9 [15.457013, 2.6561086, [(15.457014, 2.6561086)]]

    10 [14.014359, 2.5015385, [(14.01436, 2.5015385)]]

    11 [12.706352, 2.3613949, [(12.706352, 2.3613949)]]

    12 [11.520427, 2.2343314, [(11.520427, 2.2343314)]]

    13 [10.445186, 2.119127, [(10.445186, 2.119127)]]

    14 [9.4703016, 2.0146751, [(9.4703016, 2.0146751)]]

    15 [8.5864067, 1.9199722, [(8.5864067, 1.9199722)]]

    16 [7.7850089, 1.8341081, [(7.7850089, 1.8341081)]]

    17 [7.0584083, 1.756258, [(7.0584083, 1.756258)]]

    18 [6.3996239, 1.685674, [(6.3996239, 1.685674)]]

    19 [5.8023257, 1.6216778, [(5.8023257, 1.6216778)]]

    20 [5.260776, 1.5636545, [(5.260776, 1.5636545)]]

    21 [4.7697697, 1.5110468, [(4.7697697, 1.5110468)]]

    22 [4.3245912, 1.4633491, [(4.3245912, 1.4633491)]]

    23 [3.9209633, 1.4201032, [(3.9209635, 1.4201032)]]

    24 [3.5550067, 1.3808936, [(3.5550067, 1.3808936)]]

    25 [3.2232056, 1.3453435, [(3.2232056, 1.3453435)]]

    26 [2.9223735, 1.3131114, [(2.9223738, 1.3131114)]]

    27 [2.6496189, 1.2838877, [(2.6496186, 1.2838877)]]

    28 [2.4023216, 1.2573916, [(2.4023218, 1.2573916)]]

    29 [2.1781051, 1.2333684, [(2.1781051, 1.2333684)]]

    30 [1.9748148, 1.2115873, [(1.9748147, 1.2115873)]]

    31 [1.7904993, 1.1918392, [(1.7904994, 1.1918392)]]

    32 [1.623386, 1.1739342, [(1.6233861, 1.1739342)]]

    33 [1.4718695, 1.1577003, [(1.4718695, 1.1577003)]]

    34 [1.3344955, 1.1429816, [(1.3344957, 1.1429816)]]

    35 [1.2099417, 1.1296366, [(1.2099419, 1.1296366)]]

    36 [1.0970144, 1.1175373, [(1.0970144, 1.1175373)]]

    37 [0.9946267, 1.1065671, [(0.9946267, 1.1065671)]]

    38 [0.90179497, 1.0966209, [(0.90179503, 1.0966209)]]

    39 [0.81762749, 1.087603, [(0.81762755, 1.087603)]]

    40 [0.74131513, 1.0794266, [(0.74131513, 1.0794266)]]

    41 [0.67212623, 1.0720135, [(0.67212629, 1.0720135)]]

    42 [0.60939401, 1.0652922, [(0.60939401, 1.0652922)]]

    43 [0.55251688, 1.0591983, [(0.55251688, 1.0591983)]]

    44 [0.50094914, 1.0536731, [(0.50094914, 1.0536731)]]

    45 [0.45419374, 1.0486636, [(0.45419377, 1.0486636)]]

    46 [0.41180158, 1.0441216, [(0.41180158, 1.0441216)]]

    47 [0.37336722, 1.0400037, [(0.37336725, 1.0400037)]]

    48 [0.33851996, 1.03627, [(0.33851999, 1.03627)]]

    49 [0.30692515, 1.0328848, [(0.30692515, 1.0328848)]]

    50 [0.27827826, 1.0298156, [(0.27827829, 1.0298156)]]

    51 [0.25230527, 1.0270327, [(0.25230527, 1.0270327)]]

    52 [0.2287569, 1.0245097, [(0.2287569, 1.0245097)]]

    53 [0.20740573, 1.022222, [(0.20740573, 1.022222)]]

    54 [0.18804836, 1.020148, [(0.18804836, 1.020148)]]

    55 [0.17049654, 1.0182675, [(0.17049655, 1.0182675)]]

    56 [0.15458433, 1.0165626, [(0.15458435, 1.0165626)]]

    57 [0.14015675, 1.0150168, [(0.14015675, 1.0150168)]]

    58 [0.12707591, 1.0136153, [(0.12707591, 1.0136153)]]

    59 [0.11521538, 1.0123445, [(0.11521538, 1.0123445)]]

    60 [0.10446167, 1.0111923, [(0.10446167, 1.0111923)]]

    61 [0.094712019, 1.0101477, [(0.094712019, 1.0101477)]]

    62 [0.085872017, 1.0092006, [(0.085872017, 1.0092006)]]

    63 [0.077858053, 1.0083419, [(0.077858053, 1.0083419)]]

    64 [0.070591293, 1.0075634, [(0.070591293, 1.0075634)]]

    65 [0.064002357, 1.0068574, [(0.064002357, 1.0068574)]]

    66 [0.05802846, 1.0062174, [(0.05802846, 1.0062174)]]

    67 [0.052612226, 1.005637, [(0.052612226, 1.005637)]]

    68 [0.047702473, 1.005111, [(0.047702473, 1.005111)]]

    69 [0.043249767, 1.0046339, [(0.043249767, 1.0046339)]]

    70 [0.039213181, 1.0042014, [(0.039213181, 1.0042014)]]

    71 [0.035553534, 1.0038093, [(0.035553537, 1.0038093)]]

    72 [0.032236177, 1.0034539, [(0.032236181, 1.0034539)]]

    73 [0.029227654, 1.0031315, [(0.029227655, 1.0031315)]]

    74 [0.02649951, 1.0028392, [(0.02649951, 1.0028392)]]

    75 [0.024025917, 1.0025742, [(0.024025917, 1.0025742)]]

    76 [0.021783749, 1.002334, [(0.021783751, 1.002334)]]

    77 [0.01975123, 1.0021162, [(0.019751232, 1.0021162)]]

    78 [0.017907381, 1.0019187, [(0.017907381, 1.0019187)]]

    79 [0.016236702, 1.0017396, [(0.016236704, 1.0017396)]]

    80 [0.014720838, 1.0015773, [(0.014720838, 1.0015773)]]

    81 [0.01334699, 1.00143, [(0.013346991, 1.00143)]]

    82 [0.012100856, 1.0012965, [(0.012100856, 1.0012965)]]

    83 [0.010971785, 1.0011755, [(0.010971785, 1.0011755)]]

    84 [0.0099481745, 1.0010659, [(0.0099481754, 1.0010659)]]

    85 [0.009018898, 1.0009663, [(0.009018898, 1.0009663)]]

    86 [0.0081768828, 1.0008761, [(0.0081768837, 1.0008761)]]

    87 [0.0074131489, 1.0007943, [(0.0074131489, 1.0007943)]]

    88 [0.0067215762, 1.0007201, [(0.0067215762, 1.0007201)]]

    89 [0.0060940585, 1.0006529, [(0.0060940585, 1.0006529)]]

    90 [0.0055252709, 1.000592, [(0.0055252714, 1.000592)]]

    91 [0.0050098896, 1.0005368, [(0.0050098896, 1.0005368)]]

    92 [0.0045425892, 1.0004867, [(0.0045425892, 1.0004867)]]

    93 [0.0041189194, 1.0004413, [(0.0041189194, 1.0004413)]]

    94 [0.0037339528, 1.0004001, [(0.003733953, 1.0004001)]]

    95 [0.0033854644, 1.0003628, [(0.0033854644, 1.0003628)]]

    96 [0.0030694802, 1.0003289, [(0.0030694804, 1.0003289)]]

    97 [0.0027837753, 1.0002983, [(0.0027837753, 1.0002983)]]

    98 [0.0025234222, 1.0002704, [(0.0025234222, 1.0002704)]]

    99 [0.0022875469, 1.0002451, [(0.0022875469, 1.0002451)]]


    출처: 모두를 위한 딥러닝


    댓글

Designed by Tistory.