aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--modexpng_fpga_model.py113
1 files changed, 64 insertions, 49 deletions
diff --git a/modexpng_fpga_model.py b/modexpng_fpga_model.py
index b1628e3..54db95f 100644
--- a/modexpng_fpga_model.py
+++ b/modexpng_fpga_model.py
@@ -94,7 +94,6 @@ class ModExpNG_Operand():
self._init_from_words(words, length)
-
def _init_from_words(self, words, count):
for i in range(count):
@@ -628,80 +627,96 @@ class ModExpNG_Worker():
if __name__ == "__main__":
# load test vector
- vector = ModExpNG_TestVector()
-
# create worker
+ # set numbers of words
+ # obtain known good reference value with built-in math
+ # create helper quantity
+ # mutate blinding quantities with built-in math
+
+ vector = ModExpNG_TestVector()
worker = ModExpNG_Worker()
- # numbers of words
- n_num_words = KEY_LENGTH // _WORD_WIDTH
+ n_num_words = KEY_LENGTH // _WORD_WIDTH
pq_num_words = n_num_words // 2
- # obtain known good reference value with built-in math
s_known = pow(vector.m.number(), vector.d.number(), vector.n.number())
- # one
- i = ModExpNG_Operand(1, _KEY_LENGTH_HALF)
+ i = ModExpNG_Operand(1, KEY_LENGTH)
- # bring one into Montgomery domain (glue 2**r to one)
- ip_factor = worker.multiply(i, vector.p_factor, vector.p, vector.p_coeff, pq_num_words)
- iq_factor = worker.multiply(i, vector.q_factor, vector.q, vector.q_coeff, pq_num_words)
+ x_mutated_known = pow(vector.x.number(), 2, vector.n.number())
+ y_mutated_known = pow(vector.y.number(), 2, vector.n.number())
+ # bring one into Montgomery domain (glue 2**r to one)
# bring blinding coefficients into Montgomery domain (glue 2**(2*r) to x and y)
- x_factor = worker.multiply(vector.x, vector.n_factor, vector.n, vector.n_coeff, n_num_words)
- y_factor = worker.multiply(vector.y, vector.n_factor, vector.n, vector.n_coeff, n_num_words)
-
# blind message
- m_blind = worker.multiply(vector.m, y_factor, vector.n, vector.n_coeff, n_num_words)
+ # convert message to non-redundant representation
+ # first reduce message, this glues 2**-r to the message as a side effect
+ # unglue 2**-r from message by gluing 2**r to it to compensate
+ # bring message into Montgomery domain (glue 2**r to message)
+ # do "easier" exponentiations
+ # return "easier" parts from Montgomery domain (unglue 2**r from result)
+ # do the "Garner's formula" part
+ # r = sp - sq mod p
+ # sr_qinv = sr * qinv mod p
+ # q_sr_qinv = q * sr_qinv
+ # s_crt = sq + q_sr_qinv
+ # unblind s
+ # mutate blinding factors
+ ip_factor = worker.multiply(i, vector.p_factor, vector.p, vector.p_coeff, pq_num_words)
+ iq_factor = worker.multiply(i, vector.q_factor, vector.q, vector.q_coeff, pq_num_words)
+
+ x_factor = worker.multiply(vector.x, vector.n_factor, vector.n, vector.n_coeff, n_num_words)
+ y_factor = worker.multiply(vector.y, vector.n_factor, vector.n, vector.n_coeff, n_num_words)
+
+ m_blind = worker.multiply(vector.m, y_factor, vector.n, vector.n_coeff, n_num_words)
- # have to convert to non-redundant representation here
worker.reduce(m_blind)
- # first reduce message, this glues 2**-r to the message as a side effect
- mp_blind_inverse_factor = worker.multiply(m_blind, None, vector.p, vector.p_coeff, pq_num_words, reduce_only=True)
- mq_blind_inverse_factor = worker.multiply(m_blind, None, vector.q, vector.q_coeff, pq_num_words, reduce_only=True)
+ mp_blind_inverse_factor = worker.multiply(m_blind, None, vector.p, vector.p_coeff, pq_num_words, reduce_only=True)
+ mq_blind_inverse_factor = worker.multiply(m_blind, None, vector.q, vector.q_coeff, pq_num_words, reduce_only=True)
- # unglue 2**-r from message by gluing 2**r to it to compensate
- mp_blind = worker.multiply(mp_blind_inverse_factor, vector.p_factor, vector.p, vector.p_coeff, pq_num_words)
- mq_blind = worker.multiply(mq_blind_inverse_factor, vector.q_factor, vector.q, vector.q_coeff, pq_num_words)
+ mp_blind = worker.multiply(mp_blind_inverse_factor, vector.p_factor, vector.p, vector.p_coeff, pq_num_words)
+ mq_blind = worker.multiply(mq_blind_inverse_factor, vector.q_factor, vector.q, vector.q_coeff, pq_num_words)
- # bring message into Montgomery domain (glue 2**r to message)
- mp_blind_factor = worker.multiply(mp_blind, vector.p_factor, vector.p, vector.p_coeff, pq_num_words)
- mq_blind_factor = worker.multiply(mq_blind, vector.q_factor, vector.q, vector.q_coeff, pq_num_words)
+ mp_blind_factor = worker.multiply(mp_blind, vector.p_factor, vector.p, vector.p_coeff, pq_num_words)
+ mq_blind_factor = worker.multiply(mq_blind, vector.q_factor, vector.q, vector.q_coeff, pq_num_words)
- # do "easier" exponentiations
- sp_blind_factor = worker.exponentiate(ip_factor, mp_blind_factor, vector.dp, vector.p, vector.p_factor, vector.p_coeff, pq_num_words)
- sq_blind_factor = worker.exponentiate(iq_factor, mq_blind_factor, vector.dq, vector.q, vector.q_factor, vector.q_coeff, pq_num_words)
+ sp_blind_factor = worker.exponentiate(ip_factor, mp_blind_factor, vector.dp, vector.p, vector.p_factor, vector.p_coeff, pq_num_words)
+ sq_blind_factor = worker.exponentiate(iq_factor, mq_blind_factor, vector.dq, vector.q, vector.q_factor, vector.q_coeff, pq_num_words)
- # return "easier" parts from Montgomery domain (unglue 2**r from result)
- sp_blind = worker.multiply(i, sp_blind_factor, vector.p, vector.p_coeff, pq_num_words)
- sq_blind = worker.multiply(i, sq_blind_factor, vector.q, vector.q_coeff, pq_num_words)
+ sp_blind = worker.multiply(i, sp_blind_factor, vector.p, vector.p_coeff, pq_num_words)
+ sq_blind = worker.multiply(i, sq_blind_factor, vector.q, vector.q_coeff, pq_num_words)
- #
- # do the "Garner's formula" part
- #
+ sr_blind = worker.subtract(sp_blind, sq_blind, vector.p, pq_num_words)
- # 1. r = sp - sq mod p
- sr_blind = worker.subtract(sp_blind, sq_blind, vector.p, pq_num_words)
+ sr_qinv_blind_inverse_factor = worker.multiply(sr_blind, vector.qinv, vector.p, vector.p_coeff, pq_num_words)
+ sr_qinv_blind = worker.multiply(sr_qinv_blind_inverse_factor, vector.p_factor, vector.p, vector.p_coeff, pq_num_words)
+ q_sr_qinv_blind = worker.multiply(vector.q, sr_qinv_blind, None, None, pq_num_words, multiply_only=True)
- # 2. sr_qinv = sr * qinv mod p
- sr_qinv_blind_inverse_factor = worker.multiply(sr_blind, vector.qinv, vector.p, vector.p_coeff, pq_num_words)
- sr_qinv_blind = worker.multiply(sr_qinv_blind_inverse_factor, vector.p_factor, vector.p, vector.p_coeff, pq_num_words)
+ s_crt_blinded = worker.add(sq_blind, q_sr_qinv_blind, pq_num_words)
- # 3. q_sr_qinv = q * sr_qinv
- q_sr_qinv_blind = worker.multiply(vector.q, sr_qinv_blind, None, None, pq_num_words, multiply_only=True)
+ s_crt_unblinded = worker.multiply(s_crt_blinded, x_factor, vector.n, vector.n_coeff, n_num_words)
- # 4. s_crt = sq + q_sr_qinv
- s_crt_blinded = worker.add(sq_blind, q_sr_qinv_blind, pq_num_words)
+ x_mutated_factor = worker.multiply(x_factor, x_factor, vector.n, vector.n_coeff, n_num_words)
+ y_mutated_factor = worker.multiply(y_factor, y_factor, vector.n, vector.n_coeff, n_num_words)
- # unblind s
- s_crt_unblinded = worker.multiply(s_crt_blinded, x_factor, vector.n, vector.n_coeff, n_num_words)
+ x_mutated = worker.multiply(i, x_mutated_factor, vector.n, vector.n_coeff, n_num_words)
+ y_mutated = worker.multiply(i, y_mutated_factor, vector.n, vector.n_coeff, n_num_words)
+
+ worker.reduce(s_crt_unblinded)
+ worker.reduce(x_mutated)
+ worker.reduce(y_mutated)
# check
- if s_crt_unblinded.number() != s_known:
- print("ERROR: s_crt_unblinded != s_known!")
- else:
- print("s is OK")
+ if s_crt_unblinded.number() != s_known: print("ERROR: s_crt_unblinded != s_known!")
+ else: print("s is OK")
+
+ if x_mutated.number() != x_mutated_known: print("ERROR: x_mutated != x_mutated_known!")
+ else: print("x_mutated is OK")
+
+ if y_mutated.number() != y_mutated_known: print("ERROR: y_mutated != y_mutated_known!")
+ else: print("y_mutated is OK")
+
#
# End-of-File