diff --git a/pyrca/__init__.py b/pyrca/__init__.py index f97c759..eed9d11 100644 --- a/pyrca/__init__.py +++ b/pyrca/__init__.py @@ -3,11 +3,10 @@ # All rights reserved. # SPDX-License-Identifier: BSD-3-Clause # For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause# -from pkg_resources import get_distribution, DistributionNotFound +from importlib.metadata import version, PackageNotFoundError try: - dist = get_distribution("sfr-pyrca") -except DistributionNotFound: + __version__ = version("sfr-pyrca") +except PackageNotFoundError: __version__ = "Please install PyRCA with setup.py" -else: - __version__ = dist.version + diff --git a/pyrca/thirdparty/causallearn/score/LocalScoreFunction.py b/pyrca/thirdparty/causallearn/score/LocalScoreFunction.py index cdc19bc..22d7247 100644 --- a/pyrca/thirdparty/causallearn/score/LocalScoreFunction.py +++ b/pyrca/thirdparty/causallearn/score/LocalScoreFunction.py @@ -34,8 +34,8 @@ def local_score_BIC(Data: ndarray, i: int, PAi: List[int], parameters=None) -> f if len(PAi) == 0: return n * np.log(cov[i, i]) - yX = np.mat(cov[np.ix_([i], PAi)]) - XX = np.mat(cov[np.ix_(PAi, PAi)]) + yX = np.asmatrix(cov[np.ix_([i], PAi)]) + XX = np.asmatrix(cov[np.ix_(PAi, PAi)]) H = np.log(cov[i, i] - yX * np.linalg.inv(XX) * yX.T) return n * H + np.log(n) * len(PAi) * lambda_value @@ -68,8 +68,8 @@ def local_score_BIC_from_cov( if len(PAi) == 0: return n * np.log(cov[i, i]) - yX = np.mat(cov[np.ix_([i], PAi)]) - XX = np.mat(cov[np.ix_(PAi, PAi)]) + yX = np.asmatrix(cov[np.ix_([i], PAi)]) + XX = np.asmatrix(cov[np.ix_(PAi, PAi)]) H = np.log(cov[i, i] - yX * np.linalg.inv(XX) * yX.T) return n * H + np.log(n) * len(PAi) * lambda_value @@ -194,7 +194,7 @@ def local_score_cv_general( score: local score """ - Data = np.mat(Data) + Data = np.asmatrix(Data) PAi = list(PAi) T = Data.shape[0] @@ -223,7 +223,7 @@ def local_score_cv_general( Kx, _ = kernel(X, X, (theta, 1)) # Gaussian kernel H0 = ( - np.mat(np.eye(T)) - np.mat(np.ones((T, T))) / T + np.asmatrix(np.eye(T)) - np.asmatrix(np.ones((T, T))) / T ) # for centering of the data in feature space Kx = H0 * Kx * H0 # kernel matrix for X @@ -234,7 +234,7 @@ def local_score_cv_general( # mx = len(IIx) # set the kernel for PA - Kpa = np.mat(np.ones((T, T))) + Kpa = np.asmatrix(np.ones((T, T))) for m in range(PA.shape[1]): G = np.sum((np.multiply(PA[:, m], PA[:, m])), axis=1) @@ -251,7 +251,7 @@ def local_score_cv_general( Kpa = np.multiply(Kpa, kernel(PA[:, m], PA[:, m], (theta, 1))[0]) H0 = ( - np.mat(np.eye(T)) - np.mat(np.ones((T, T))) / T + np.asmatrix(np.eye(T)) - np.asmatrix(np.ones((T, T))) / T ) # for centering of the data in feature space Kpa = H0 * Kpa * H0 # kernel matrix for PA @@ -317,11 +317,11 @@ def local_score_cv_general( raise ValueError("Not cover all logic path") n1 = T - nv - tmp1 = pdinv(Kpa_tr + n1 * var_lambda * np.mat(np.eye(n1))) + tmp1 = pdinv(Kpa_tr + n1 * var_lambda * np.asmatrix(np.eye(n1))) tmp2 = tmp1 * Kx_tr * tmp1 tmp3 = ( tmp1 - * pdinv(np.mat(np.eye(n1)) + n1 * var_lambda**2 / gamma * tmp2) + * pdinv(np.asmatrix(np.eye(n1)) + n1 * var_lambda**2 / gamma * tmp2) * tmp1 ) A = ( @@ -350,7 +350,7 @@ def local_score_cv_general( * Kpa_tr_te ) / gamma - B = n1 * var_lambda**2 / gamma * tmp2 + np.mat(np.eye(n1)) + B = n1 * var_lambda**2 / gamma * tmp2 + np.asmatrix(np.eye(n1)) L = np.linalg.cholesky(B) C = np.sum(np.log(np.diag(L))) # CV = CV + (nv*nv*log(2*pi) + nv*C + nv*mx*log(gamma) + trace(A))/2; @@ -372,7 +372,7 @@ def local_score_cv_general( theta = 1 / (width**2) Kx, _ = kernel(X, X, (theta, 1)) # Gaussian kernel - H0 = np.mat(np.eye(T)) - np.mat(np.ones((T, T))) / ( + H0 = np.asmatrix(np.eye(T)) - np.asmatrix(np.ones((T, T))) / ( T ) # for centering of the data in feature space Kx = H0 * Kx * H0 # kernel matrix for X @@ -423,10 +423,10 @@ def local_score_cv_general( - 1 / (gamma * n1) * Kx_tr_te.T - * pdinv(np.mat(np.eye(n1)) + 1 / (gamma * n1) * Kx_tr) + * pdinv(np.asmatrix(np.eye(n1)) + 1 / (gamma * n1) * Kx_tr) * Kx_tr_te ) / gamma - B = 1 / (gamma * n1) * Kx_tr + np.mat(np.eye(n1)) + B = 1 / (gamma * n1) * Kx_tr + np.asmatrix(np.eye(n1)) L = np.linalg.cholesky(B) C = np.sum(np.log(np.diag(L))) @@ -487,13 +487,13 @@ def local_score_cv_multi( theta = 1 / (width**2 * X.shape[1]) # Kx, _ = kernel(X, X, (theta, 1)) # Gaussian kernel - H0 = np.mat(np.eye(T)) - np.mat(np.ones((T, T))) / ( + H0 = np.asmatrix(np.eye(T)) - np.asmatrix(np.ones((T, T))) / ( T ) # for centering of the data in feature space Kx = H0 * Kx * H0 # kernel matrix for X # set the kernel for PA - Kpa = np.mat(np.ones((T, T))) + Kpa = np.asmatrix(np.ones((T, T))) for m in range(len(PAi)): PA = Data[:, parameters["dlabel"][PAi[m]]] @@ -510,7 +510,7 @@ def local_score_cv_multi( theta = 1 / (width**2 * PA.shape[1]) Kpa = np.multiply(Kpa, kernel(PA, PA, (theta, 1))[0]) - H0 = np.mat(np.eye(T)) - np.mat(np.ones((T, T))) / ( + H0 = np.asmatrix(np.eye(T)) - np.asmatrix(np.ones((T, T))) / ( T ) # for centering of the data in feature space Kpa = H0 * Kpa * H0 # kernel matrix for PA @@ -577,11 +577,11 @@ def local_score_cv_multi( raise ValueError("Not cover all logic path") n1 = T - nv - tmp1 = pdinv(Kpa_tr + n1 * var_lambda * np.mat(np.eye(n1))) + tmp1 = pdinv(Kpa_tr + n1 * var_lambda * np.asmatrix(np.eye(n1))) tmp2 = tmp1 * Kx_tr * tmp1 tmp3 = ( tmp1 - * pdinv(np.mat(np.eye(n1)) + n1 * var_lambda**2 / gamma * tmp2) + * pdinv(np.asmatrix(np.eye(n1)) + n1 * var_lambda**2 / gamma * tmp2) * tmp1 ) A = ( @@ -610,7 +610,7 @@ def local_score_cv_multi( * Kpa_tr_te ) / gamma - B = n1 * var_lambda**2 / gamma * tmp2 + np.mat(np.eye(n1)) + B = n1 * var_lambda**2 / gamma * tmp2 + np.asmatrix(np.eye(n1)) L = np.linalg.cholesky(B) C = np.sum(np.log(np.diag(L))) # CV = CV + (nv*nv*log(2*pi) + nv*C + nv*mx*log(gamma) + trace(A))/2; @@ -632,7 +632,7 @@ def local_score_cv_multi( theta = 1 / (width**2 * X.shape[1]) # Kx, _ = kernel(X, X, (theta, 1)) # Gaussian kernel - H0 = np.mat(np.eye(T)) - np.mat(np.ones((T, T))) / ( + H0 = np.asmatrix(np.eye(T)) - np.asmatrix(np.ones((T, T))) / ( T ) # for centering of the data in feature space Kx = H0 * Kx * H0 # kernel matrix for X @@ -679,10 +679,10 @@ def local_score_cv_multi( - 1 / (gamma * n1) * Kx_tr_te.T - * pdinv(np.mat(np.eye(n1)) + 1 / (gamma * n1) * Kx_tr) + * pdinv(np.asmatrix(np.eye(n1)) + 1 / (gamma * n1) * Kx_tr) * Kx_tr_te ) / gamma - B = 1 / (gamma * n1) * Kx_tr + np.mat(np.eye(n1)) + B = 1 / (gamma * n1) * Kx_tr + np.asmatrix(np.eye(n1)) L = np.linalg.cholesky(B) C = np.sum(np.log(np.diag(L))) @@ -728,7 +728,7 @@ def local_score_marginal_general( width = np.sqrt(0.5 * np.median(dists[np.where(dists > 0)], axis=1)[0, 0]) width = width * 2.5 # kernel width theta = 1 / (width**2) - H = np.mat(np.eye(T)) - np.mat(np.ones((T, T))) / T + H = np.asmatrix(np.eye(T)) - np.asmatrix(np.ones((T, T))) / T Kx, _ = kernel(X, X, (theta, 1)) Kx = H * Kx * H @@ -743,7 +743,7 @@ def local_score_marginal_general( if len(PAi): PA = Data[:, PAi] - widthPA = np.mat(np.empty((PA.shape[1], 1))) + widthPA = np.asmatrix(np.empty((PA.shape[1], 1))) # set the kernel for PA for m in range(PA.shape[1]): G = np.sum((np.multiply(PA[:, m], PA[:, m])), axis=1) @@ -777,8 +777,8 @@ def local_score_marginal_general( ) else: covfunc = np.asarray(["covSum", ["covSEard", "covNoise"]]) - PA = np.mat(np.zeros((T, 1))) - logtheta0 = np.mat([100, 0, np.log(np.sqrt(0.1))]).T + PA = np.asmatrix(np.zeros((T, 1))) + logtheta0 = np.asmatrix([100, 0, np.log(np.sqrt(0.1))]).T logtheta, fvals, iter = minimize( logtheta0, "gpr_multi_new", @@ -834,7 +834,7 @@ def local_score_marginal_multi( widthX = np.sqrt(0.5 * np.median(dists[np.where(dists > 0)], axis=1)[0, 0]) widthX = widthX * 2.5 # kernel width theta = 1 / (widthX**2) - H = np.mat(np.eye(T)) - np.mat(np.ones((T, T))) / T + H = np.asmatrix(np.eye(T)) - np.asmatrix(np.ones((T, T))) / T Kx, _ = kernel(X, X, (theta, 1)) Kx = H * Kx * H @@ -847,9 +847,9 @@ def local_score_marginal_multi( eix = eix[:, IIx] if len(PAi): - widthPA_all = np.mat(np.empty((1, 0))) + widthPA_all = np.asmatrix(np.empty((1, 0))) # set the kernel for PA - PA_all = np.mat(np.empty((Data.shape[0], 0))) + PA_all = np.asmatrix(np.empty((Data.shape[0], 0))) for m in range(len(PAi)): PA = Data[:, parameters["dlabel"][PAi[m]]] PA_all = np.hstack([PA_all, PA]) @@ -864,7 +864,7 @@ def local_score_marginal_multi( [ widthPA_all, widthPA - * np.mat(np.ones((1, np.size(parameters["dlabel"][PAi[m]])))), + * np.asmatrix(np.ones((1, np.size(parameters["dlabel"][PAi[m]])))), ] ) widthPA_all = widthPA_all * 2.5 # kernel width @@ -888,8 +888,8 @@ def local_score_marginal_multi( ) else: covfunc = np.asarray(["covSum", ["covSEard", "covNoise"]]) - PA = np.mat(np.zeros((T, 1))) - logtheta0 = np.mat([100, 0, np.log(np.sqrt(0.1))]).T + PA = np.asmatrix(np.zeros((T, 1))) + logtheta0 = np.asmatrix([100, 0, np.log(np.sqrt(0.1))]).T logtheta, fvals, iter = minimize( logtheta0, "gpr_multi_new", diff --git a/pyrca/thirdparty/causallearn/search/ScoreBased/GES.py b/pyrca/thirdparty/causallearn/search/ScoreBased/GES.py index 7109099..c4c4aea 100644 --- a/pyrca/thirdparty/causallearn/search/ScoreBased/GES.py +++ b/pyrca/thirdparty/causallearn/search/ScoreBased/GES.py @@ -44,7 +44,7 @@ def ges(X: ndarray, score_func: str = 'local_score_BIC', maxP: Optional[float] = if X.shape[0] < X.shape[1]: warnings.warn("The number of features is much larger than the sample size!") - X = np.mat(X) + X = np.asmatrix(X) if score_func == 'local_score_CV_general': # % k-fold negative cross validated likelihood based on regression in RKHS if parameters is None: parameters = {'kfold': 10, # 10 fold cross validation @@ -110,7 +110,7 @@ def ges(X: ndarray, score_func: str = 'local_score_BIC', maxP: Optional[float] = G = GeneralGraph(nodes) add_required_edges(G, background_knowledge, verbose) - # G = np.matlib.zeros((N, N)) # initialize the graph structure + # G = np.asmatrixlib.zeros((N, N)) # initialize the graph structure score = score_g(X, G, score_func, parameters, background_knowledge) # initialize the score # G = pdag2dag(G) @@ -150,7 +150,7 @@ def ges(X: ndarray, score_func: str = 'local_score_BIC', maxP: Optional[float] = np.where(G.graph[j, :] == Endpoint.TAIL.value)[0]) # neighbors of Xj Ti = np.union1d(np.where(G.graph[:, i] != Endpoint.NULL.value)[0], - np.where(G.graph[i, 0] != Endpoint.NULL.value)[0]) # adjacent to Xi + np.where(G.graph[i, :] != Endpoint.NULL.value)[0]) # adjacent to Xi NTi = np.setdiff1d(np.arange(N), Ti) T0 = np.intersect1d(Tj, NTi) # find the neighbours of Xj that are not adjacent to Xi diff --git a/pyrca/thirdparty/causallearn/utils/DAG2CPDAG.py b/pyrca/thirdparty/causallearn/utils/DAG2CPDAG.py index 6723fa7..d056500 100644 --- a/pyrca/thirdparty/causallearn/utils/DAG2CPDAG.py +++ b/pyrca/thirdparty/causallearn/utils/DAG2CPDAG.py @@ -28,7 +28,7 @@ def dag2cpdag(G: Dag) -> GeneralGraph: map(lambda x: G.node_map[x], G.get_causal_ordering())) # Perform a topological sort on the nodes of G # nodes_order(1) is the node which has the highest order # nodes_order(N) is the node which has the lowest order - edges_order = np.mat([[], []], dtype=np.int64).T + edges_order = np.asmatrix([[], []], dtype=np.int64).T # edges_order(1,:) is the edge which has the highest order # edges_order(M,:) is the edge which has the lowest order M = G.get_num_edges() # the number of edges in this DAG @@ -54,7 +54,7 @@ def dag2cpdag(G: Dag) -> GeneralGraph: else: if G.graph[j, i] == 1: break - edges_order = np.r_[edges_order, np.mat([i, j])] + edges_order = np.r_[edges_order, np.asmatrix([i, j])] ## ---------------------------------------------------------------- sign_edges = np.zeros(M) # 0 means unknown, 1 means compelled, -1 means reversible diff --git a/pyrca/thirdparty/causallearn/utils/GESUtils.py b/pyrca/thirdparty/causallearn/utils/GESUtils.py index da568fe..141330c 100644 --- a/pyrca/thirdparty/causallearn/utils/GESUtils.py +++ b/pyrca/thirdparty/causallearn/utils/GESUtils.py @@ -371,8 +371,8 @@ def dist2(x, c): if dimx != dimc: raise Exception('Data dimension does not match dimension of centres') - n2 = (np.mat(np.ones((ncentres, 1))) * np.sum(np.multiply(x, x).T, axis=0)).T + \ - np.mat(np.ones((ndata, 1))) * np.sum(np.multiply(c, c).T, axis=0) - \ + n2 = (np.asmatrix(np.ones((ncentres, 1))) * np.sum(np.multiply(x, x).T, axis=0)).T + \ + np.asmatrix(np.ones((ndata, 1))) * np.sum(np.multiply(c, c).T, axis=0) - \ 2 * (x * c.T) # Rounding errors occasionally cause negative entries in n2 @@ -393,7 +393,7 @@ def pdinv(A): Ainv = vh.T.dot(np.diag(1 / s)).dot(u.T) except Exception as e: raise e - return np.mat(Ainv) + return np.asmatrix(Ainv) def add_required_edges(G, background_knowledge, verbose=False): if background_knowledge is None: diff --git a/pyrca/thirdparty/causallearn/utils/ScoreUtils.py b/pyrca/thirdparty/causallearn/utils/ScoreUtils.py index e04e0c5..b5fe5c6 100644 --- a/pyrca/thirdparty/causallearn/utils/ScoreUtils.py +++ b/pyrca/thirdparty/causallearn/utils/ScoreUtils.py @@ -39,8 +39,8 @@ def dist2(x, c): if (dimx != dimc): raise Exception('Data dimension does not match dimension of centres') - n2 = (np.mat(np.ones((ncentres, 1))) * np.sum(np.multiply(x, x).T, axis=0)).T + \ - np.mat(np.ones((ndata, 1))) * np.sum(np.multiply(c, c).T, axis=0) - \ + n2 = (np.asmatrix(np.ones((ncentres, 1))) * np.sum(np.multiply(x, x).T, axis=0)).T + \ + np.asmatrix(np.ones((ndata, 1))) * np.sum(np.multiply(c, c).T, axis=0) - \ 2 * (x * c.T) # Rounding errors occasionally cause negative entries in n2 @@ -61,7 +61,7 @@ def pdinv(A): Ainv = vh.T.dot(np.diag(1 / s)).dot(u.T) except Exception as e: raise e - return np.mat(Ainv) + return np.asmatrix(Ainv) def eigdec(x, N, evals_only=False): @@ -433,9 +433,9 @@ def gpr_multi_new(logtheta=None, covfunc=None, x=None, y=None, xstar=None, nargo out1 = 0.5 * np.trace(y.T * alpha) + m * np.sum(np.log(np.diag(L)), axis=0) + 0.5 * m * n * np.log( 2 * np.pi) if nargout == 2: # ... and if requested, its partial derivatives - out2 = np.mat(np.zeros((logtheta.shape[0], 1))) # set the size of the derivative vector + out2 = np.asmatrix(np.zeros((logtheta.shape[0], 1))) # set the size of the derivative vector W = m * (np.linalg.inv(L.T) * ( - np.linalg.inv(L) * np.mat(np.eye(n)))) - alpha * alpha.T # precompute for convenience + np.linalg.inv(L) * np.asmatrix(np.eye(n)))) - alpha * alpha.T # precompute for convenience for i in range(len(out2) - 1, len(out2)): temp = list(covfunc.copy()) temp.append(logtheta) @@ -453,7 +453,7 @@ def gpr_multi_new(logtheta=None, covfunc=None, x=None, y=None, xstar=None, nargo if nargout == 2: v = np.linalg.inv(L) * Kstar - v = np.mat(v) + v = np.asmatrix(v) out2 = Kss - np.sum(np.multiply(v, v), axis=0).T if nargout == 1: @@ -485,7 +485,7 @@ def solve_chol(A, B): return res -K = np.mat(np.empty((0, 0))) +K = np.asmatrix(np.empty((0, 0))) def cov_noise(logtheta=None, x=None, z=None, nargout=1): @@ -509,12 +509,12 @@ def cov_noise(logtheta=None, x=None, z=None, nargout=1): s2 = np.exp(2 * logtheta)[0, 0] # noise variance if (logtheta is not None and x is not None and z is None): # compute covariance matrix - A = s2 * np.mat(np.eye(x.shape[0])) + A = s2 * np.asmatrix(np.eye(x.shape[0])) elif (nargout == 2): # compute test set covariances A = s2 B = 0 # zeros cross covariance by independence else: # compute derivative matrix - A = 2 * s2 * np.mat(np.eye(x.shape[0])) + A = 2 * s2 * np.asmatrix(np.eye(x.shape[0])) if (nargout == 2): return A, B @@ -552,21 +552,21 @@ def cov_seard(loghyper=None, x=None, z=None, nargout=1): sf2 = np.exp(2 * loghyper[D]) # signal variance if (loghyper is not None and x is not None): - K = sf2 * np.exp(-sq_dist(np.mat(np.diag(1 / ell) * x.T)) / 2) + K = sf2 * np.exp(-sq_dist(np.asmatrix(np.diag(1 / ell) * x.T)) / 2) A = K elif nargout == 2: # compute test set covariances - A = sf2 * np.mat(np.ones((z, 1))) - B = sf2 * np.exp(-sq_dist(np.mat(np.diag(1 / ell)) * x.T, np.mat(np.diag(1 / ell)) * z) / 2) + A = sf2 * np.asmatrix(np.ones((z, 1))) + B = sf2 * np.exp(-sq_dist(np.asmatrix(np.diag(1 / ell)) * x.T, np.asmatrix(np.diag(1 / ell)) * z) / 2) else: # check for correct dimension of the previously calculated kernel matrix if (K.shape[0] != n or K.shape[1] != n): - K = sf2 * np.exp(-sq_dist(np.mat(np.diag(1 / ell) * x.T)) / 2) + K = sf2 * np.exp(-sq_dist(np.asmatrix(np.diag(1 / ell) * x.T)) / 2) if z <= D: # length scale parameters A = np.multiply(K, sq_dist(x[:, z].T / ell[z])) else: # magnitude parameter A = 2 * K - K = np.mat(np.empty((0, 0))) + K = np.asmatrix(np.empty((0, 0))) if (nargout == 2): return A, B @@ -610,13 +610,13 @@ def sq_dist(a, b=None, Q=None): raise Exception('Error: column lengths must agree.') if Q is None: - C = np.mat(np.zeros((n, m))) + C = np.asmatrix(np.zeros((n, m))) for d in range(D): temp = np.tile(b[d, :], (n, 1)) - np.tile(a[d, :].T, (1, m)) C = C + np.multiply(temp, temp) else: if (n, m) == Q.shape: - C = np.mat(np.zeros((D, 1))) + C = np.asmatrix(np.zeros((D, 1))) for d in range(D): temp = np.tile(b[d, :], (n, 1)) - np.tile(a[d, :].T, (1, m)) temp = np.multiply(temp, temp) @@ -656,7 +656,7 @@ def cov_sum(covfunc, logtheta=None, x=None, z=None, nargout=1): v = np.asarray(v) if (logtheta is not None and x is not None and z is None): # compute covariance matrix - A = np.mat(np.zeros((n, n))) # allocate space for covariance matrix + A = np.asmatrix(np.zeros((n, n))) # allocate space for covariance matrix for i in range(len(covfunc)): # iteration over summand functions f = covfunc[i] temp = [f] @@ -668,8 +668,8 @@ def cov_sum(covfunc, logtheta=None, x=None, z=None, nargout=1): if ( logtheta is not None and x is not None and z is not None): # compute derivative matrix or test set covariances if nargout == 2: # compute test set cavariances - A = np.mat(np.zeros((z, 1))) - B = np.mat(np.zeros((x.shape[0], z))) # allocate space + A = np.asmatrix(np.zeros((z, 1))) + B = np.asmatrix(np.zeros((x.shape[0], z))) # allocate space for i in range(len(covfunc)): f = covfunc[i] temp = [f] diff --git a/pyrca/thirdparty/pgmpy/factors/discrete/CPD.py b/pyrca/thirdparty/pgmpy/factors/discrete/CPD.py index e6ba590..737268e 100755 --- a/pyrca/thirdparty/pgmpy/factors/discrete/CPD.py +++ b/pyrca/thirdparty/pgmpy/factors/discrete/CPD.py @@ -127,7 +127,7 @@ def __init__( if evidence is None: expected_cpd_shape = (variable_card, 1) else: - expected_cpd_shape = (variable_card, np.product(evidence_card)) + expected_cpd_shape = (variable_card, np.prod(evidence_card)) if values.shape != expected_cpd_shape: raise ValueError( f"values must be of shape {expected_cpd_shape}. Got shape: {values.shape}" @@ -577,7 +577,7 @@ def get_random(variable, evidence=None, cardinality=None, state_names=None): ) else: parent_card = [cardinality[var] for var in evidence] - values = np.random.rand(cardinality[variable], np.product(parent_card)) + values = np.random.rand(cardinality[variable], np.prod(parent_card)) values = values / np.sum(values, axis=0) node_cpd = TabularCPD( variable=variable, diff --git a/pyrca/thirdparty/pgmpy/factors/discrete/DiscreteFactor.py b/pyrca/thirdparty/pgmpy/factors/discrete/DiscreteFactor.py index 01265f9..6b951cf 100755 --- a/pyrca/thirdparty/pgmpy/factors/discrete/DiscreteFactor.py +++ b/pyrca/thirdparty/pgmpy/factors/discrete/DiscreteFactor.py @@ -91,8 +91,8 @@ def __init__(self, variables, cardinality, values, state_names={}): "Number of elements in cardinality must be equal to number of variables" ) - if values.size != np.product(cardinality): - raise ValueError(f"Values array must be of size: {np.product(cardinality)}") + if values.size != np.prod(cardinality): + raise ValueError(f"Values array must be of size: {np.prod(cardinality)}") if len(set(variables)) != len(variables): raise ValueError("Variable names cannot be same") @@ -866,7 +866,7 @@ def is_valid_cpd(self): self.to_factor() .marginalize(self.scope()[:1], inplace=False) .values.flatten("C"), - np.ones(np.product(self.cardinality[:0:-1])), + np.ones(np.prod(self.cardinality[:0:-1])), atol=0.01, ) diff --git a/pyrca/thirdparty/pgmpy/models/DynamicBayesianNetwork.py b/pyrca/thirdparty/pgmpy/models/DynamicBayesianNetwork.py index c206944..ce01fa9 100755 --- a/pyrca/thirdparty/pgmpy/models/DynamicBayesianNetwork.py +++ b/pyrca/thirdparty/pgmpy/models/DynamicBayesianNetwork.py @@ -499,7 +499,7 @@ def check_model(self): cpd.to_factor() .marginalize([node], inplace=False) .values.flatten("C"), - np.ones(np.product(evidence_card)), + np.ones(np.prod(evidence_card)), atol=0.01, ): raise ValueError( diff --git a/pyrca/thirdparty/pgmpy/models/MarkovModel.py b/pyrca/thirdparty/pgmpy/models/MarkovModel.py index 4b21c05..ed58c49 100755 --- a/pyrca/thirdparty/pgmpy/models/MarkovModel.py +++ b/pyrca/thirdparty/pgmpy/models/MarkovModel.py @@ -545,7 +545,7 @@ def to_junction_tree(self): # To compute clique potential, initially set it as unity factor var_card = [self.get_cardinality()[x] for x in node] clique_potential = DiscreteFactor( - node, var_card, np.ones(np.product(var_card)) + node, var_card, np.ones(np.prod(var_card)) ) # multiply it with the factors associated with the variables present # in the clique (or node) diff --git a/setup.py b/setup.py index b1d083f..2ea4876 100644 --- a/setup.py +++ b/setup.py @@ -26,7 +26,7 @@ install_requires=[ "numpy>=1.17", "pandas>=1.1.0", - "scikit-learn>=0.24,<1.2", + "scikit-learn", "networkx>=2.6", "matplotlib", "pyyaml",