条件平差
附有参数的条件平差
间接平差
附有限制条件的间接平差
综合程序:
#!/usr/bin/python3
#-*- coding: utf-8 -*-
import numpy as np
def limitAdjustment(A,W,P=None):
A=np.mat(A)
W=np.mat(W)
if P is None:
P = np.identity(A.shape[1])
else:
P=np.mat(P)
print("-P\n",P)
Q = np.linalg.inv(P)
print("\n-Q\n",Q)
invP = np.linalg.inv(P)
Naa = A*invP*np.transpose(A)
K = -np.linalg.inv(Naa)*W
v = invP*np.transpose(A)*K
r = A.shape[0]
sigama = np.sqrt((np.transpose(v)*P*v)[0,0]/A.shape[0])
print("K:\n{} \n -V:\n {}\n\n -sigama: {}".format(K,v,sigama))
#改正数协因数阵
Qvv = Q*np.transpose(A)*np.linalg.inv(Naa)*A*Q
#平差值协因数阵
Qadjhh = Q - Qvv
print("------\nQvv:\n{} \n -QL^L^:\n {}".format(Qvv,Qadjhh))
return v
def infiniteParaLimitAdjustment(A,B,W,P=None,r=None):
"""
附有参数的条件平差模型 - 多次检验正确
"""
A = np.mat(A)
B = np.mat(B)
if P is None:
P = np.identity(A.shape[1])
else:
P=np.mat(P)
Q = np.linalg.inv(P)
Naa = A*Q*np.transpose(A)
invNaa = np.linalg.inv(Naa)
tranB = np.transpose(B)
Nbb = tranB*invNaa*B
x = -np.linalg.inv(Nbb)*tranB*invNaa*W
K = -invNaa*(B*x+W)
V = Q*np.transpose(A)*K
print("- P\n{}\n -Q\n{} \n- Naa\n{} \n - invNaa \n{}\n- Nbb\n{} \n-x\n{}\n-v\n{}".format(P,Q,Naa,invNaa,Nbb,x,V))
if r is not None:
print("\n-sigama: ",np.sqrt((np.transpose(V)*P*V)[0,0]/r))
print("\n ----平差后协因数\n - Qx^x^=invNbb\n{}\n - Qvv:".format(np.linalg.inv(Nbb)))
return x,V
def indirectAdjustment(B,L,P=None,r=None):
"""
间接平差
"""
# jianjie adjustment
matrixB = np.mat(B)
matrixL = np.mat(L)
print("- matrixL\n",matrixL)
if P is None:
P = np.identity(matrixB.shape[0])
else:
P=np.mat(P)
print("-matrixP\n",P)
Q = np.linalg.inv(P)
print("-matrixQ\n",Q)
NBB = np.linalg.inv(np.transpose(matrixB)*P*matrixB)
W = np.transpose(matrixB)*P*matrixL
print("-"*20,"\n")
print(NBB)
print("-W-\n")
print(W)
x = NBB*W
V = matrixB*x-matrixL
print("matrixX\n",x,"\nmatrixV\n",V)
if r is not None:
print("\n-sigama: ",np.sqrt((np.transpose(V)*P*V)[0,0]/r))
invNBB = np.linalg.inv(NBB)
Q_adjLL = matrixB*invNBB*np.transpose(matrixB)
print("\n ----平差后协因数\n - Qx^x^=invNbb\n{}\n - Qvv:\n{}\n -QL^L^\n{}".format(invNBB,Q-Q_adjLL,Q_adjLL))
print()
return x,V
def infiniteLimitIndiractment():
pass
def test_limitAdjustment():
A=[1,1,1]
W=[-6/1000]
P=[3,3,2]
L=[[4.557],[4.277],[-8.84]]
P=np.diag(P)
v=limitAdjustment(A,W,P)
adjL = np.mat(L)+v
equation = adjL[0,0]+adjL[1,0]+adjL[2,0]
print("-AdjL\n{}\n -检核闭合差\n{}".format(adjL,equation))
def test_indirectAdjustment():
L = [[0],[-23],[0],[14],[0]]
conr = [3.5,2.7,4.0,3.0,2.5]
P = np.diag(conr)
B = [[1,0,0],
[-1,1,0],
[0,1,0],
[0,1,-1],
[0,0,1]
]
indirectAdjustment(B,L,P,3)
def test_infiniteParaLimitAdjustment():
A = [[1,-1,0,0],
[0,0,1,-1],
[1,0,0,0],
[0,0,1,0]]
B = [[0],[0],[-1],[1]]
W = [[-20],[20],[0],[30]]
P = np.diag([3,0.5,1,0.5])
x,v=infiniteParaLimitAdjustment(A,B,W,P)
L=np.mat([[1.95],[1.97],[3.08],[3.06]])
adjL=L+v/1000
print("平差值-adjX\n{} \n - adjL\n{}".format(59.95+x/1000,adjL))
#print("\n- other\n{} - {}".format(adjL[0,0]**2+adjL[1,0]**2-adjL[2,0]**2,adjL[0,0]*adjL[1,0]-(L[0,0]*L[1,0]+x)[0,0]))
def test_infiniteLimitIndiractment():
pass
if __name__=="__main__":
test_limitAdjustment()
#test_infiniteParaLimitAdjustment()
#test_indirectAdjustment()
执行检验:
Linu下执行脚本:
# 条件平差
root@ws-tkpjiq-0:~/linuxLearn/adjustment# python3 limitAdjustment.py
- P
[[3. 0. 0. 0. ]
[0. 0.5 0. 0. ]
[0. 0. 1. 0. ]
[0. 0. 0. 0.5]]
-Q
[[0.33333333 0. 0. 0. ]
[0. 2. 0. 0. ]
[0. 0. 1. 0. ]
[0. 0. 0. 2. ]]
- Naa
[[2.33333333 0. 0.33333333 0. ]
[0. 3. 0. 1. ]
[0.33333333 0. 0.33333333 0. ]
[0. 1. 0. 1. ]]
- invNaa
[[ 0.5 0. -0.5 0. ]
[ 0. 0.5 0. -0.5]
[-0.5 0. 3.5 0. ]
[ 0. -0.5 0. 1.5]]
- Nbb
[[5.]]
-x
[[-5.]]
-v
[[ -5.]
[-25.]
[-25.]
[ -5.]]
----平差后协因数
- Qx^x^=invNbb
[[0.2]]
- Qvv:
平差值-adjX
[[59.945]]
- adjL
[[1.945]
[1.945]
[3.055]
[3.055]]
# 间接平差
root@ws-tkpjiq-0:~/linuxLearn/adjustment# python3 limitAdjustment.py
- matrixL
[[ 0]
[-23]
[ 0]
[ 14]
[ 0]]
-matrixP
[[3.5 0. 0. 0. 0. ]
[0. 2.7 0. 0. 0. ]
[0. 0. 4. 0. 0. ]
[0. 0. 0. 3. 0. ]
[0. 0. 0. 0. 2.5]]
-matrixQ
[[0.28571429 0. 0. 0. 0. ]
[0. 0.37037037 0. 0. 0. ]
[0. 0. 0.25 0. 0. ]
[0. 0. 0. 0.33333333 0. ]
[0. 0. 0. 0. 0.4 ]]
--------------------
[[0.18882384 0.06322512 0.03448643]
[0.06322512 0.14518361 0.07919106]
[0.03448643 0.07919106 0.2250133 ]]
-W-
[[ 62.1]
[-20.1]
[-42. ]]
matrixX
[[ 9.00670569]
[-2.31793507]
[-8.90069186]]
matrixV
[[ 9.00670569]
[11.67535923]
[-2.31793507]
[-7.41724321]
[-8.90069186]]
----平差后协因数
- Qx^x^=invNbb
[[ 6.20000000e+00 -2.70000000e+00 1.46991901e-16]
[-2.70000000e+00 9.70000000e+00 -3.00000000e+00]
[ 4.68434010e-17 -3.00000000e+00 5.50000000e+00]]
- Qvv:
[[-5.91428571e+00 8.90000000e+00 2.70000000e+00 2.70000000e+00
-1.46991901e-16]
[ 8.90000000e+00 -2.09296296e+01 -1.24000000e+01 -1.54000000e+01
3.00000000e+00]
[ 2.70000000e+00 -1.24000000e+01 -9.45000000e+00 -1.27000000e+01
3.00000000e+00]
[ 2.70000000e+00 -1.54000000e+01 -1.27000000e+01 -2.08666667e+01
8.50000000e+00]
[-4.68434010e-17 3.00000000e+00 3.00000000e+00 8.50000000e+00
-5.10000000e+00]]
-QL^L^
[[ 6.20000000e+00 -8.90000000e+00 -2.70000000e+00 -2.70000000e+00
1.46991901e-16]
[-8.90000000e+00 2.13000000e+01 1.24000000e+01 1.54000000e+01
-3.00000000e+00]
[-2.70000000e+00 1.24000000e+01 9.70000000e+00 1.27000000e+01
-3.00000000e+00]
[-2.70000000e+00 1.54000000e+01 1.27000000e+01 2.12000000e+01
-8.50000000e+00]
[ 4.68434010e-17 -3.00000000e+00 -3.00000000e+00 -8.50000000e+00
5.50000000e+00]]
root@ws-tkpjiq-0:~/linuxLearn/adjustment# python3 limitAdjustment.py
- matrixL
[[ 0]
[-23]
[ 0]
[ 14]
[ 0]]
-matrixP
[[3.5 0. 0. 0. 0. ]
[0. 2.7 0. 0. 0. ]
[0. 0. 4. 0. 0. ]
[0. 0. 0. 3. 0. ]
[0. 0. 0. 0. 2.5]]
-matrixQ
[[0.28571429 0. 0. 0. 0. ]
[0. 0.37037037 0. 0. 0. ]
[0. 0. 0.25 0. 0. ]
[0. 0. 0. 0.33333333 0. ]
[0. 0. 0. 0. 0.4 ]]
--------------------
[[0.18882384 0.06322512 0.03448643]
[0.06322512 0.14518361 0.07919106]
[0.03448643 0.07919106 0.2250133 ]]
-W-
[[ 62.1]
[-20.1]
[-42. ]]
matrixX
[[ 9.00670569]
[-2.31793507]
[-8.90069186]]
matrixV
[[ 9.00670569]
[11.67535923]
[-2.31793507]
[-7.41724321]
[-8.90069186]]
-sigama: 18.58820435488333
----平差后协因数
- Qx^x^=invNbb
[[ 6.20000000e+00 -2.70000000e+00 1.46991901e-16]
[-2.70000000e+00 9.70000000e+00 -3.00000000e+00]
[ 4.68434010e-17 -3.00000000e+00 5.50000000e+00]]
- Qvv:
[[-5.91428571e+00 8.90000000e+00 2.70000000e+00 2.70000000e+00
-1.46991901e-16]
[ 8.90000000e+00 -2.09296296e+01 -1.24000000e+01 -1.54000000e+01
3.00000000e+00]
[ 2.70000000e+00 -1.24000000e+01 -9.45000000e+00 -1.27000000e+01
3.00000000e+00]
[ 2.70000000e+00 -1.54000000e+01 -1.27000000e+01 -2.08666667e+01
8.50000000e+00]
[-4.68434010e-17 3.00000000e+00 3.00000000e+00 8.50000000e+00
-5.10000000e+00]]
-QL^L^
[[ 6.20000000e+00 -8.90000000e+00 -2.70000000e+00 -2.70000000e+00
1.46991901e-16]
[-8.90000000e+00 2.13000000e+01 1.24000000e+01 1.54000000e+01
-3.00000000e+00]
[-2.70000000e+00 1.24000000e+01 9.70000000e+00 1.27000000e+01
-3.00000000e+00]
[-2.70000000e+00 1.54000000e+01 1.27000000e+01 2.12000000e+01
-8.50000000e+00]
[ 4.68434010e-17 -3.00000000e+00 -3.00000000e+00 -8.50000000e+00
5.50000000e+00]]
#条件平差
root@ws-tkpjiq-0:~/linuxLearn/adjustment# python3 limitAdjustment.py
-P
[[3 0 0]
[0 3 0]
[0 0 2]]
-Q
[[0.33333333 0. 0. ]
[0. 0.33333333 0. ]
[0. 0. 0.5 ]]
K:
[[0.00514286]]
-V:
[[0.00171429]
[0.00171429]
[0.00257143]]
-sigama: 0.005554920598635309
------
Qvv:
[[0.0952381 0.0952381 0.14285714]
[0.0952381 0.0952381 0.14285714]
[0.14285714 0.14285714 0.21428571]]
-QL^L^:
[[ 0.23809524 -0.0952381 -0.14285714]
[-0.0952381 0.23809524 -0.14285714]
[-0.14285714 -0.14285714 0.28571429]]
-AdjL
[[ 4.55871429]
[ 4.27871429]
[-8.83742857]]
-检核闭合差
0.0
root@ws-tkpjiq-0:~/linuxLearn/adjustment#
Comments NOTHING