X-Git-Url: https://git.sesse.net/?a=blobdiff_plain;f=foosrank.cpp;h=03edcb5b4c1047572a644aa4122af70e288c32f7;hb=HEAD;hp=45c08cc1479264adf49909c28f210c8683fa31f7;hpb=8ff55698125efe22c05ada9a0bc127dc316f3383;p=foosball diff --git a/foosrank.cpp b/foosrank.cpp index 45c08cc..03edcb5 100644 --- a/foosrank.cpp +++ b/foosrank.cpp @@ -1,6 +1,6 @@ -#include -#include -#include +#include +#include +#include #include #include @@ -8,6 +8,8 @@ #include #include +#define USE_LOGISTIC_DISTRIBUTION 0 + // step sizes static const double int_step_size = 75.0; @@ -16,12 +18,20 @@ static const double rating_constant = 455.0; using namespace std; -double prob_score(int k, int a, double rd); -double prob_score_real(int k, int a, double binomial, double rd_norm); -double prodai(int k, int a); -double fac(int x); +static double prob_score_real(int k, int a, double binomial, double rd_norm); +static double prodai(int k, int a); +static double fac(int x); +#if USE_LOGISTIC_DISTRIBUTION +// sech²(x) +static double sech2(double x) +{ + double e = exp(2.0 * x); + return 4.0 * e / ((e+1.0) * (e+1.0)); +} +#endif +#if 0 // probability of match ending k-a (k>a) when winnerR - loserR = RD // // +inf @@ -38,14 +48,15 @@ double fac(int x); // Glicko/Bradley-Terry assumption that a player rated 400 points over // his/her opponent will win with a probability of 10/11 =~ 0.90909. // -double prob_score(int k, int a, double rd) +static double prob_score(int k, int a, double rd) { return prob_score_real(k, a, prodai(k, a) / fac(k-1), rd/rating_constant); } +#endif // computes x^a, probably more efficiently than pow(x, a) (but requires that a // is n unsigned integer) -double intpow(double x, unsigned a) +static double intpow(double x, unsigned a) { double result = 1.0; @@ -63,7 +74,7 @@ double intpow(double x, unsigned a) // Same, but takes in binomial(a+k-1, k-1) as an argument in // addition to a. Faster if you already have that precomputed, and assumes rd // is already divided by 455. -double prob_score_real(int k, int a, double binomial, double rd_norm) +static double prob_score_real(int k, int a, double binomial, double rd_norm) { double nom = binomial * intpow(pow(2.0, rd_norm), a); double denom = intpow(1.0 + pow(2.0, rd_norm), k+a); @@ -71,7 +82,7 @@ double prob_score_real(int k, int a, double binomial, double rd_norm) } // Calculates Product(a+i, i=1..k-1) (see above). -double prodai(int k, int a) +static double prodai(int k, int a) { double prod = 1.0; for (int i = 1; i < k; ++i) @@ -79,7 +90,7 @@ double prodai(int k, int a) return prod; } -double fac(int x) +static double fac(int x) { double prod = 1.0; for (int i = 2; i <= x; ++i) @@ -87,11 +98,7 @@ double fac(int x) return prod; } -void convolve(int size) -{ -} - -void compute_opponent_rating_pdf(int k, int a, double mu2, double sigma2, double winfac, vector > &result) +static void compute_opponent_rating_pdf(int k, int a, double mu2, double sigma2, double winfac, vector > *result) { double binomial_precompute = prodai(k, a) / fac(k-1); winfac /= rating_constant; @@ -99,43 +106,59 @@ void compute_opponent_rating_pdf(int k, int a, double mu2, double sigma2, double int sz = (6000.0 - 0.0) / int_step_size; double h = (6000.0 - 0.0) / sz; - fftw_plan f1, f2, b; - complex *func1, *func2, *res; - - func1 = reinterpret_cast *>(fftw_malloc(sz*2*sizeof(complex))); - func2 = reinterpret_cast *>(fftw_malloc(sz*2*sizeof(complex))); - res = reinterpret_cast *>(fftw_malloc(sz*2*sizeof(complex))); - f1 = fftw_plan_dft_1d(sz*2, - reinterpret_cast(func1), - reinterpret_cast(func1), - FFTW_FORWARD, - FFTW_MEASURE); - f2 = fftw_plan_dft_1d(sz*2, - reinterpret_cast(func2), - reinterpret_cast(func2), - FFTW_FORWARD, - FFTW_MEASURE); - b = fftw_plan_dft_1d(sz*2, - reinterpret_cast(res), - reinterpret_cast(res), - FFTW_BACKWARD, - FFTW_MEASURE); + static bool inited = false; + static fftw_plan f1, f2, b; + static complex *func1, *func2, *res; + + if (!inited) { + func1 = reinterpret_cast *>(fftw_malloc(sz*2*sizeof(complex))); + func2 = reinterpret_cast *>(fftw_malloc(sz*2*sizeof(complex))); + res = reinterpret_cast *>(fftw_malloc(sz*2*sizeof(complex))); + f1 = fftw_plan_dft_1d(sz*2, + reinterpret_cast(func1), + reinterpret_cast(func1), + FFTW_FORWARD, + FFTW_MEASURE); + f2 = fftw_plan_dft_1d(sz*2, + reinterpret_cast(func2), + reinterpret_cast(func2), + FFTW_FORWARD, + FFTW_MEASURE); + b = fftw_plan_dft_1d(sz*2, + reinterpret_cast(res), + reinterpret_cast(res), + FFTW_BACKWARD, + FFTW_MEASURE); + inited = true; + } // start off by zero for (int i = 0; i < sz*2; ++i) { func1[i].real() = func1[i].imag() = func2[i].real() = func2[i].imag() = 0.0; } +#if USE_LOGISTIC_DISTRIBUTION + double invsigma2 = 1.0 / sigma2; +#else + double invsq2sigma2 = 1.0 / (sqrt(2.0) * sigma2); +#endif for (int i = 0; i < sz; ++i) { double x1 = 0.0 + h*i; - double z = (x1 - mu2)/sigma2; - func1[i].real() = exp(-(z*z/2.0)); + + // opponent's pdf +#if USE_LOGISTIC_DISTRIBUTION + double z = (x1 - mu2) * invsigma2; + func1[i].real() = sech2(0.5 * z); +#else + double z = (x1 - mu2) * invsq2sigma2; + func1[i].real() = exp(-z*z); +#endif double x2 = -3000.0 + h*i; func2[(i - sz/2 + sz*2)%(sz*2)].real() = prob_score_real(k, a, binomial_precompute, x2*winfac); } - result.reserve(sz*2); + result->reserve(sz*2); // convolve fftw_execute(f1); @@ -144,47 +167,32 @@ void compute_opponent_rating_pdf(int k, int a, double mu2, double sigma2, double res[i] = func1[i] * func2[i]; } fftw_execute(b); + + result->reserve(sz); for (int i = 0; i < sz; ++i) { double r1 = i*h; - result.push_back(make_pair(r1, abs(res[i]))); + result->push_back(make_pair(r1, abs(res[i]))); } } // normalize the curve so we know that A ~= 1 -void normalize(vector > &curve) +static void normalize(vector > *curve) { double peak = 0.0; - for (vector >::const_iterator i = curve.begin(); i != curve.end(); ++i) { + for (vector >::const_iterator i = curve->begin(); i != curve->end(); ++i) { peak = max(peak, i->second); } double invpeak = 1.0 / peak; - for (vector >::iterator i = curve.begin(); i != curve.end(); ++i) { + for (vector >::iterator i = curve->begin(); i != curve->end(); ++i) { i->second *= invpeak; } } -// computes matA * matB -void mat_mul(double *matA, unsigned ah, unsigned aw, - double *matB, unsigned bh, unsigned bw, - double *result) -{ - assert(aw == bh); - for (unsigned y = 0; y < bw; ++y) { - for (unsigned x = 0; x < ah; ++x) { - double sum = 0.0; - for (unsigned c = 0; c < aw; ++c) { - sum += matA[c*ah + x] * matB[y*bh + c]; - } - result[y*bw + x] = sum; - } - } -} - // computes matA^T * matB -void mat_mul_trans(double *matA, unsigned ah, unsigned aw, - double *matB, unsigned bh, unsigned bw, - double *result) +static void mat_mul_trans(double *matA, unsigned ah, unsigned aw, + double *matB, unsigned bh, unsigned bw, + double *result) { assert(ah == bh); for (unsigned y = 0; y < bw; ++y) { @@ -198,89 +206,46 @@ void mat_mul_trans(double *matA, unsigned ah, unsigned aw, } } -void print3x3(double *M) -{ - printf("%f %f %f\n", M[0], M[3], M[6]); - printf("%f %f %f\n", M[1], M[4], M[7]); - printf("%f %f %f\n", M[2], M[5], M[8]); -} - -void print3x1(double *M) -{ - printf("%f\n", M[0]); - printf("%f\n", M[1]); - printf("%f\n", M[2]); -} - -// solves Ax = B by Gauss-Jordan elimination, where A is a 3x3 matrix, -// x is a column vector of length 3 and B is a row vector of length 3. +// solves Ax = B by Gauss-Jordan elimination, where A is an NxN matrix, +// x is a column vector of length N and B is a row vector of length N. // Destroys its input in the process. -void solve3x3(double *A, double *x, double *B) +template +static void solve_matrix(double *A, double *x, double *B) { - // row 1 -= row 0 * (a1/a0) - { - double f = A[1] / A[0]; - A[1] = 0.0; - A[4] -= A[3] * f; - A[7] -= A[6] * f; - - B[1] -= B[0] * f; - } - - // row 2 -= row 0 * (a2/a0) - { - double f = A[2] / A[0]; - A[2] = 0.0; - A[5] -= A[3] * f; - A[8] -= A[6] * f; - - B[2] -= B[0] * f; - } - - // row 2 -= row 1 * (a5/a4) - { - double f = A[5] / A[4]; - A[5] = 0.0; - A[8] -= A[7] * f; - - B[2] -= B[1] * f; - } - - // back substitute: - - // row 1 -= row 2 * (a7/a8) - { - double f = A[7] / A[8]; - A[7] = 0.0; - - B[1] -= B[2] * f; - } - - // row 0 -= row 2 * (a6/a8) - { - double f = A[6] / A[8]; - A[6] = 0.0; + for (int i = 0; i < N; ++i) { + for (int j = i+1; j < N; ++j) { + // row j -= row i * (a[i,j] / a[i,i]) + double f = A[j+i*N] / A[i+i*N]; + + A[j+i*N] = 0.0; + for (int k = i+1; k < N; ++k) { + A[j+k*N] -= A[i+k*N] * f; + } - B[0] -= B[2] * f; + B[j] -= B[i] * f; + } } - // row 0 -= row 1 * (a3/a4) - { - double f = A[3] / A[4]; - A[3] = 0.0; - - B[0] -= B[1] * f; + // back-substitute + for (int i = N; i --> 0; ) { + for (int j = i; j --> 0; ) { + // row j -= row i * (a[j,j] / a[j,i]) + double f = A[i+j*N] / A[j+j*N]; + + // A[j+i*N] = 0.0; + B[j] -= B[i] * f; + } } // normalize - x[0] = B[0] / A[0]; - x[1] = B[1] / A[4]; - x[2] = B[2] / A[8]; + for (int i = 0; i < N; ++i) { + x[i] = B[i] / A[i+i*N]; + } } // Give an OK starting estimate for the least squares, by numerical integration // of statistical moments. -void estimate_musigma(vector > &curve, double &mu_result, double &sigma_result) +static void estimate_musigma(const vector > &curve, double *mu_result, double *sigma_result) { double h = (curve.back().first - curve.front().first) / (curve.size() - 1); @@ -311,8 +276,8 @@ void estimate_musigma(vector > &curve, double &mu_result, d ex = (h/3.0) * ex / area; ex2 = (h/3.0) * ex2 / area; - mu_result = ex; - sigma_result = sqrt(ex2 - ex * ex); + *mu_result = ex; + *sigma_result = sqrt(ex2 - ex * ex); } // Find best fit of the data in curves to a Gaussian pdf, based on the @@ -322,7 +287,7 @@ void estimate_musigma(vector > &curve, double &mu_result, d // Note that the algorithm blows up quite hard if the initial estimate is // not good enough. Use estimate_musigma to get a reasonable starting // estimate. -void least_squares(vector > &curve, double mu1, double sigma1, double &mu_result, double &sigma_result) +static void least_squares(const vector > &curve, double mu1, double sigma1, double *mu_result, double *sigma_result) { double A = 1.0; double mu = mu1; @@ -347,17 +312,30 @@ void least_squares(vector > &curve, double mu1, double sigm for (unsigned i = 0; i < curve.size(); ++i) { double x = curve[i].first; +#if USE_LOGISTIC_DISTRIBUTION + // df/dA(x_i) + matA[i + 0 * curve.size()] = sech2(0.5 * (x-mu)/sigma); + + // df/dµ(x_i) + matA[i + 1 * curve.size()] = A * matA[i + 0 * curve.size()] + * tanh(0.5 * (x-mu)/sigma) / sigma; + + // df/dσ(x_i) + matA[i + 2 * curve.size()] = + matA[i + 1 * curve.size()] * (x-mu)/sigma; +#else // df/dA(x_i) matA[i + 0 * curve.size()] = exp(-(x-mu)*(x-mu)/(2.0*sigma*sigma)); // df/dµ(x_i) - matA[i + 1 * curve.size()] = + matA[i + 1 * curve.size()] = A * (x-mu)/(sigma*sigma) * matA[i + 0 * curve.size()]; // df/dσ(x_i) matA[i + 2 * curve.size()] = matA[i + 1 * curve.size()] * (x-mu)/sigma; +#endif } // find dβ @@ -365,7 +343,11 @@ void least_squares(vector > &curve, double mu1, double sigm double x = curve[i].first; double y = curve[i].second; +#if USE_LOGISTIC_DISTRIBUTION + dbeta[i] = y - A * sech2(0.5 * (x-mu)/sigma); +#else dbeta[i] = y - A * exp(- (x-mu)*(x-mu)/(2.0*sigma*sigma)); +#endif } // compute a and b @@ -373,7 +355,7 @@ void least_squares(vector > &curve, double mu1, double sigm mat_mul_trans(matA, curve.size(), 3, dbeta, curve.size(), 1, matATdb); // solve - solve3x3(matATA, dlambda, matATdb); + solve_matrix<3>(matATA, dlambda, matATdb); A += dlambda[0]; mu += dlambda[1]; @@ -384,46 +366,85 @@ void least_squares(vector > &curve, double mu1, double sigm break; } - mu_result = mu; - sigma_result = sigma; + *mu_result = mu; + *sigma_result = sigma; } -void compute_new_rating(double mu1, double sigma1, double mu2, double sigma2, int score1, int score2, double &mu, double &sigma) +void compute_new_rating(double mu1, double sigma1, double mu2, double sigma2, int score1, int score2, double *mu, double *sigma, double *probability) { vector > curve; if (score1 > score2) { - compute_opponent_rating_pdf(score1, score2, mu2, sigma2, -1.0, curve); + compute_opponent_rating_pdf(score1, score2, mu2, sigma2, -1.0, &curve); } else { - compute_opponent_rating_pdf(score2, score1, mu2, sigma2, 1.0, curve); + compute_opponent_rating_pdf(score2, score1, mu2, sigma2, 1.0, &curve); } // multiply in the gaussian for (unsigned i = 0; i < curve.size(); ++i) { double r1 = curve[i].first; + + // my pdf double z = (r1 - mu1) / sigma1; +#if USE_LOGISTIC_DISTRIBUTION + curve[i].second *= sech2(0.5 * z); +#else double gaussian = exp(-(z*z/2.0)); curve[i].second *= gaussian; +#endif + } + + // Compute the overall probability of the given result, by integrating + // the entire resulting pdf. Note that since we're actually evaluating + // a double integral, we'll need to multiply by h² instead of h. + { + double h = (curve.back().first - curve.front().first) / (curve.size() - 1); + double sum = curve.front().second; + for (unsigned i = 1; i < curve.size() - 1; i += 2) { + sum += 4.0 * curve[i].second; + } + for (unsigned i = 2; i < curve.size() - 1; i += 2) { + sum += 2.0 * curve[i].second; + } + sum += curve.back().second; + sum *= h * h / 3.0; + + // FFT convolution multiplication factor (FFTW computes unnormalized + // transforms) + sum /= (curve.size() * 2); + + // pdf normalization factors +#if USE_LOGISTIC_DISTRIBUTION + sum /= (sigma1 * 4.0); + sum /= (sigma2 * 4.0); +#else + sum /= (sigma1 * sqrt(2.0 * M_PI)); + sum /= (sigma2 * sqrt(2.0 * M_PI)); +#endif + + *probability = sum; } double mu_est, sigma_est; - normalize(curve); - estimate_musigma(curve, mu_est, sigma_est); + normalize(&curve); + estimate_musigma(curve, &mu_est, &sigma_est); least_squares(curve, mu_est, sigma_est, mu, sigma); } -void compute_new_double_rating(double mu1, double sigma1, double mu2, double sigma2, double mu3, double sigma3, double mu4, double sigma4, int score1, int score2, double &mu, double &sigma) +static void compute_new_double_rating(double mu1, double sigma1, double mu2, double sigma2, double mu3, double sigma3, double mu4, double sigma4, int score1, int score2, double *mu, double *sigma, double *probability) { vector > curve, newcurve; double mu_t = mu3 + mu4; double sigma_t = sqrt(sigma3*sigma3 + sigma4*sigma4); if (score1 > score2) { - compute_opponent_rating_pdf(score1, score2, mu_t, sigma_t, -1.0, curve); + compute_opponent_rating_pdf(score1, score2, mu_t, sigma_t, -1.0, &curve); } else { - compute_opponent_rating_pdf(score2, score1, mu_t, sigma_t, 1.0, curve); + compute_opponent_rating_pdf(score2, score1, mu_t, sigma_t, 1.0, &curve); } + newcurve.reserve(curve.size()); + // iterate over r1 double h = 3000.0 / curve.size(); for (unsigned i = 0; i < curve.size(); ++i) { @@ -434,24 +455,74 @@ void compute_new_double_rating(double mu1, double sigma1, double mu2, double sig double r1 = i * h; // iterate over r2 +#if USE_LOGISTIC_DISTRIBUTION + double invsigma2 = 1.0 / sigma2; +#else + double invsq2sigma2 = 1.0 / (sqrt(2.0) * sigma2); +#endif for (unsigned j = 0; j < curve.size(); ++j) { double r1plusr2 = curve[j].first; double r2 = r1plusr2 - r1; - double z = (r2 - mu2) / sigma2; - double gaussian = exp(-(z*z/2.0)); +#if USE_LOGISTIC_DISTRIBUTION + double z = (r2 - mu2) * invsigma2; + double gaussian = sech2(0.5 * z); +#else + double z = (r2 - mu2) * invsq2sigma2; + double gaussian = exp(-z*z); +#endif sum += curve[j].second * gaussian; } +#if USE_LOGISTIC_DISTRIBUTION + double z = (r1 - mu1) / sigma1; + double gaussian = sech2(0.5 * z); +#else double z = (r1 - mu1) / sigma1; double gaussian = exp(-(z*z/2.0)); +#endif newcurve.push_back(make_pair(r1, gaussian * sum)); } + // Compute the overall probability of the given result, by integrating + // the entire resulting pdf. Note that since we're actually evaluating + // a triple integral, we'll need to multiply by 4h³ (no idea where the + // 4 factor comes from, probably from the 0..6000 range somehow) instead + // of h. + { + double h = (newcurve.back().first - newcurve.front().first) / (newcurve.size() - 1); + double sum = newcurve.front().second; + for (unsigned i = 1; i < newcurve.size() - 1; i += 2) { + sum += 4.0 * newcurve[i].second; + } + for (unsigned i = 2; i < newcurve.size() - 1; i += 2) { + sum += 2.0 * newcurve[i].second; + } + sum += newcurve.back().second; + + sum *= 4.0 * h * h * h / 3.0; + + // FFT convolution multiplication factor (FFTW computes unnormalized + // transforms) + sum /= (newcurve.size() * 2); + + // pdf normalization factors +#if USE_LOGISTIC_DISTRIBUTION + sum /= (sigma1 * 4.0); + sum /= (sigma2 * 4.0); + sum /= (sigma_t * 4.0); +#else + sum /= (sigma1 * sqrt(2.0 * M_PI)); + sum /= (sigma2 * sqrt(2.0 * M_PI)); + sum /= (sigma_t * sqrt(2.0 * M_PI)); +#endif + + *probability = sum; + } double mu_est, sigma_est; - normalize(newcurve); - estimate_musigma(newcurve, mu_est, sigma_est); + normalize(&newcurve); + estimate_musigma(newcurve, &mu_est, &sigma_est); least_squares(newcurve, mu_est, sigma_est, mu, sigma); } @@ -475,62 +546,65 @@ int main(int argc, char **argv) double sigma4 = atof(argv[8]); int score1 = atoi(argv[9]); int score2 = atoi(argv[10]); - double mu, sigma; - compute_new_double_rating(mu1, sigma1, mu2, sigma2, mu3, sigma3, mu4, sigma4, score1, score2, mu, sigma); - printf("%f %f\n", mu, sigma); + double mu, sigma, probability; + compute_new_double_rating(mu1, sigma1, mu2, sigma2, mu3, sigma3, mu4, sigma4, score1, score2, &mu, &sigma, &probability); + printf("%f %f %f\n", mu, sigma, probability); } else if (argc > 8) { double mu3 = atof(argv[5]); double sigma3 = atof(argv[6]); double mu4 = atof(argv[7]); double sigma4 = atof(argv[8]); int k = atoi(argv[9]); - + // assess all possible scores for (int i = 0; i < k; ++i) { double newmu1_1, newmu1_2, newmu2_1, newmu2_2; double newsigma1_1, newsigma1_2, newsigma2_1, newsigma2_2; - compute_new_double_rating(mu1, sigma1, mu2, sigma2, mu3, sigma3, mu4, sigma4, k, i, newmu1_1, newsigma1_1); - compute_new_double_rating(mu2, sigma2, mu1, sigma1, mu3, sigma3, mu4, sigma4, k, i, newmu1_2, newsigma1_2); - compute_new_double_rating(mu3, sigma3, mu4, sigma4, mu1, sigma1, mu2, sigma2, i, k, newmu2_1, newsigma2_1); - compute_new_double_rating(mu4, sigma4, mu3, sigma3, mu1, sigma1, mu2, sigma2, i, k, newmu2_2, newsigma2_2); + double probability; + compute_new_double_rating(mu1, sigma1, mu2, sigma2, mu3, sigma3, mu4, sigma4, k, i, &newmu1_1, &newsigma1_1, &probability); + compute_new_double_rating(mu2, sigma2, mu1, sigma1, mu3, sigma3, mu4, sigma4, k, i, &newmu1_2, &newsigma1_2, &probability); + compute_new_double_rating(mu3, sigma3, mu4, sigma4, mu1, sigma1, mu2, sigma2, i, k, &newmu2_1, &newsigma2_1, &probability); + compute_new_double_rating(mu4, sigma4, mu3, sigma3, mu1, sigma1, mu2, sigma2, i, k, &newmu2_2, &newsigma2_2, &probability); printf("%u-%u,%f,%+f,%+f,%+f,%+f\n", - k, i, prob_score(k, i, mu3+mu4-(mu1+mu2)), newmu1_1-mu1, newmu1_2-mu2, + k, i, probability, newmu1_1-mu1, newmu1_2-mu2, newmu2_1-mu3, newmu2_2-mu4); } for (int i = k; i --> 0; ) { double newmu1_1, newmu1_2, newmu2_1, newmu2_2; double newsigma1_1, newsigma1_2, newsigma2_1, newsigma2_2; - compute_new_double_rating(mu1, sigma1, mu2, sigma2, mu3, sigma3, mu4, sigma4, i, k, newmu1_1, newsigma1_1); - compute_new_double_rating(mu2, sigma2, mu1, sigma1, mu3, sigma3, mu4, sigma4, i, k, newmu1_2, newsigma1_2); - compute_new_double_rating(mu3, sigma3, mu4, sigma4, mu1, sigma1, mu2, sigma2, k, i, newmu2_1, newsigma2_1); - compute_new_double_rating(mu4, sigma4, mu3, sigma3, mu1, sigma1, mu2, sigma2, k, i, newmu2_2, newsigma2_2); + double probability; + compute_new_double_rating(mu1, sigma1, mu2, sigma2, mu3, sigma3, mu4, sigma4, i, k, &newmu1_1, &newsigma1_1, &probability); + compute_new_double_rating(mu2, sigma2, mu1, sigma1, mu3, sigma3, mu4, sigma4, i, k, &newmu1_2, &newsigma1_2, &probability); + compute_new_double_rating(mu3, sigma3, mu4, sigma4, mu1, sigma1, mu2, sigma2, k, i, &newmu2_1, &newsigma2_1, &probability); + compute_new_double_rating(mu4, sigma4, mu3, sigma3, mu1, sigma1, mu2, sigma2, k, i, &newmu2_2, &newsigma2_2, &probability); printf("%u-%u,%f,%+f,%+f,%+f,%+f\n", - i, k, prob_score(k, i, mu1+mu2-(mu3+mu4)), newmu1_1-mu1, newmu1_2-mu2, + i, k, probability, newmu1_1-mu1, newmu1_2-mu2, newmu2_1-mu3, newmu2_2-mu4); } } else if (argc > 6) { int score1 = atoi(argv[5]); int score2 = atoi(argv[6]); - double mu, sigma; - compute_new_rating(mu1, sigma1, mu2, sigma2, score1, score2, mu, sigma); - printf("%f %f\n", mu, sigma); + double mu, sigma, probability; + compute_new_rating(mu1, sigma1, mu2, sigma2, score1, score2, &mu, &sigma, &probability); + + printf("%f %f %f\n", mu, sigma, probability); } else { int k = atoi(argv[5]); // assess all possible scores for (int i = 0; i < k; ++i) { - double newmu1, newmu2, newsigma1, newsigma2; - compute_new_rating(mu1, sigma1, mu2, sigma2, k, i, newmu1, newsigma1); - compute_new_rating(mu2, sigma2, mu1, sigma1, i, k, newmu2, newsigma2); + double newmu1, newmu2, newsigma1, newsigma2, probability; + compute_new_rating(mu1, sigma1, mu2, sigma2, k, i, &newmu1, &newsigma1, &probability); + compute_new_rating(mu2, sigma2, mu1, sigma1, i, k, &newmu2, &newsigma2, &probability); printf("%u-%u,%f,%+f,%+f\n", - k, i, prob_score(k, i, mu2-mu1), newmu1-mu1, newmu2-mu2); + k, i, probability, newmu1-mu1, newmu2-mu2); } for (int i = k; i --> 0; ) { - double newmu1, newmu2, newsigma1, newsigma2; - compute_new_rating(mu1, sigma1, mu2, sigma2, i, k, newmu1, newsigma1); - compute_new_rating(mu2, sigma2, mu1, sigma1, k, i, newmu2, newsigma2); + double newmu1, newmu2, newsigma1, newsigma2, probability; + compute_new_rating(mu1, sigma1, mu2, sigma2, i, k, &newmu1, &newsigma1, &probability); + compute_new_rating(mu2, sigma2, mu1, sigma1, k, i, &newmu2, &newsigma2, &probability); printf("%u-%u,%f,%+f,%+f\n", - i, k, prob_score(k, i, mu1-mu2), newmu1-mu1, newmu2-mu2); + i, k, probability, newmu1-mu1, newmu2-mu2); } }