aboutsummaryrefslogtreecommitdiff
path: root/modexpng_fpga_model.py
diff options
context:
space:
mode:
authorPavel V. Shatov (Meister) <meisterpaul1@yandex.ru>2019-03-23 10:58:09 +0300
committerPavel V. Shatov (Meister) <meisterpaul1@yandex.ru>2019-03-23 10:58:09 +0300
commitecbc1b71ae0553e322e8c24492d78098f1947f4f (patch)
tree070c909e51a686864d3058a1ea10b0aa69552ed6 /modexpng_fpga_model.py
parent3ef1813079662305cf62ac68dc6a7729d3961d84 (diff)
Added blinding into math model.
Diffstat (limited to 'modexpng_fpga_model.py')
-rw-r--r--modexpng_fpga_model.py92
1 files changed, 55 insertions, 37 deletions
diff --git a/modexpng_fpga_model.py b/modexpng_fpga_model.py
index 1152bdf..b1628e3 100644
--- a/modexpng_fpga_model.py
+++ b/modexpng_fpga_model.py
@@ -175,6 +175,8 @@ class ModExpNG_TestVector():
self.n_coeff = ModExpNG_Operand(vector_inst.n_coeff, KEY_LENGTH + _WORD_WIDTH)
self.p_coeff = ModExpNG_Operand(vector_inst.p_coeff, _KEY_LENGTH_HALF + _WORD_WIDTH)
self.q_coeff = ModExpNG_Operand(vector_inst.q_coeff, _KEY_LENGTH_HALF + _WORD_WIDTH)
+ self.x = ModExpNG_Operand(vector_inst.x, KEY_LENGTH)
+ self.y = ModExpNG_Operand(vector_inst.y, KEY_LENGTH)
class ModExpNG_PartRecombinator():
@@ -615,6 +617,13 @@ class ModExpNG_Worker():
return ModExpNG_Operand(None, ab_num_words, R)
+ def reduce(self, a):
+ carry = 0
+ for x in range(len(a.words)):
+ a.words[x] += carry
+ carry = (a.words[x] >> _WORD_WIDTH) & 1
+ a.words[x] &= self.lowlevel._word_mask
+
if __name__ == "__main__":
@@ -624,67 +633,76 @@ if __name__ == "__main__":
# create worker
worker = ModExpNG_Worker()
- # number of words
- pq_num_words = _KEY_LENGTH_HALF // _WORD_WIDTH
-
- # obtain known good reference values with built-in math
- s_known = pow(vector.m.number(), vector.d.number(), vector.n.number())
- sp_known = pow(vector.m.number(), vector.dp.number(), vector.p.number())
- sq_known = pow(vector.m.number(), vector.dq.number(), vector.q.number())
+ # numbers of words
+ n_num_words = KEY_LENGTH // _WORD_WIDTH
+ pq_num_words = n_num_words // 2
- # first reduce message, this glues 2**-r to the message as a side effect
- mpa = worker.multiply(vector.m, None, vector.p, vector.p_coeff, pq_num_words, reduce_only=True)
- mqa = worker.multiply(vector.m, None, vector.q, vector.q_coeff, pq_num_words, reduce_only=True)
-
- # unglue 2**-r from message by gluing 2**r to it to compensate
- mp = worker.multiply(mpa, vector.p_factor, vector.p, vector.p_coeff, pq_num_words)
- mq = worker.multiply(mqa, vector.q_factor, vector.q, vector.q_coeff, pq_num_words)
+ # obtain known good reference value with built-in math
+ s_known = pow(vector.m.number(), vector.d.number(), vector.n.number())
# one
i = ModExpNG_Operand(1, _KEY_LENGTH_HALF)
# bring one into Montgomery domain (glue 2**r to one)
- ipz = worker.multiply(i, vector.p_factor, vector.p, vector.p_coeff, pq_num_words)
- iqz = worker.multiply(i, vector.q_factor, vector.q, vector.q_coeff, pq_num_words)
+ ip_factor = worker.multiply(i, vector.p_factor, vector.p, vector.p_coeff, pq_num_words)
+ iq_factor = worker.multiply(i, vector.q_factor, vector.q, vector.q_coeff, pq_num_words)
- # bring message into Montgomery domain (glue 2**r to message)
- mpz = worker.multiply(mp, vector.p_factor, vector.p, vector.p_coeff, pq_num_words)
- mqz = worker.multiply(mq, vector.q_factor, vector.q, vector.q_coeff, pq_num_words)
+ # bring blinding coefficients into Montgomery domain (glue 2**(2*r) to x and y)
+ x_factor = worker.multiply(vector.x, vector.n_factor, vector.n, vector.n_coeff, n_num_words)
+ y_factor = worker.multiply(vector.y, vector.n_factor, vector.n, vector.n_coeff, n_num_words)
- # do "easier" exponentiations
- spz = worker.exponentiate(ipz, mpz, vector.dp, vector.p, vector.p_factor, vector.p_coeff, pq_num_words)
- sqz = worker.exponentiate(iqz, mqz, vector.dq, vector.q, vector.q_factor, vector.q_coeff, pq_num_words)
+ # blind message
+ m_blind = worker.multiply(vector.m, y_factor, vector.n, vector.n_coeff, n_num_words)
- # return "easier" parts from Montgomery domain (unglue 2**r from result)
- sp = worker.multiply(i, spz, vector.p, vector.p_coeff, pq_num_words)
- sq = worker.multiply(i, sqz, vector.q, vector.q_coeff, pq_num_words)
+ # have to convert to non-redundant representation here
+ worker.reduce(m_blind)
+
+ # first reduce message, this glues 2**-r to the message as a side effect
+ mp_blind_inverse_factor = worker.multiply(m_blind, None, vector.p, vector.p_coeff, pq_num_words, reduce_only=True)
+ mq_blind_inverse_factor = worker.multiply(m_blind, None, vector.q, vector.q_coeff, pq_num_words, reduce_only=True)
- # check "easier" results
- if sp.number() == sp_known: print("sp is OK")
- else: print("sp is WRONG!")
+ # unglue 2**-r from message by gluing 2**r to it to compensate
+ mp_blind = worker.multiply(mp_blind_inverse_factor, vector.p_factor, vector.p, vector.p_coeff, pq_num_words)
+ mq_blind = worker.multiply(mq_blind_inverse_factor, vector.q_factor, vector.q, vector.q_coeff, pq_num_words)
- if sq.number() == sq_known: print("sq is OK")
- else: print("sq is WRONG!")
+ # bring message into Montgomery domain (glue 2**r to message)
+ mp_blind_factor = worker.multiply(mp_blind, vector.p_factor, vector.p, vector.p_coeff, pq_num_words)
+ mq_blind_factor = worker.multiply(mq_blind, vector.q_factor, vector.q, vector.q_coeff, pq_num_words)
+ # do "easier" exponentiations
+ sp_blind_factor = worker.exponentiate(ip_factor, mp_blind_factor, vector.dp, vector.p, vector.p_factor, vector.p_coeff, pq_num_words)
+ sq_blind_factor = worker.exponentiate(iq_factor, mq_blind_factor, vector.dq, vector.q, vector.q_factor, vector.q_coeff, pq_num_words)
+ # return "easier" parts from Montgomery domain (unglue 2**r from result)
+ sp_blind = worker.multiply(i, sp_blind_factor, vector.p, vector.p_coeff, pq_num_words)
+ sq_blind = worker.multiply(i, sq_blind_factor, vector.q, vector.q_coeff, pq_num_words)
+
+ #
# do the "Garner's formula" part
+ #
# 1. r = sp - sq mod p
- sr = worker.subtract(sp, sq, vector.p, pq_num_words)
+ sr_blind = worker.subtract(sp_blind, sq_blind, vector.p, pq_num_words)
# 2. sr_qinv = sr * qinv mod p
- sr_qinv_a = worker.multiply(sr, vector.qinv, vector.p, vector.p_coeff, pq_num_words)
- sr_qinv = worker.multiply(sr_qinv_a, vector.p_factor, vector.p, vector.p_coeff, pq_num_words)
+ sr_qinv_blind_inverse_factor = worker.multiply(sr_blind, vector.qinv, vector.p, vector.p_coeff, pq_num_words)
+ sr_qinv_blind = worker.multiply(sr_qinv_blind_inverse_factor, vector.p_factor, vector.p, vector.p_coeff, pq_num_words)
# 3. q_sr_qinv = q * sr_qinv
- q_sr_qinv = worker.multiply(vector.q, sr_qinv, None, None, pq_num_words, multiply_only=True)
+ q_sr_qinv_blind = worker.multiply(vector.q, sr_qinv_blind, None, None, pq_num_words, multiply_only=True)
# 4. s_crt = sq + q_sr_qinv
- s_crt = worker.add(sq, q_sr_qinv, pq_num_words)
+ s_crt_blinded = worker.add(sq_blind, q_sr_qinv_blind, pq_num_words)
+
+ # unblind s
+ s_crt_unblinded = worker.multiply(s_crt_blinded, x_factor, vector.n, vector.n_coeff, n_num_words)
# check
- if s_crt.number() != s_known:
- print("ERROR: s_crt != s_known!")
+ if s_crt_unblinded.number() != s_known:
+ print("ERROR: s_crt_unblinded != s_known!")
else:
print("s is OK")
+#
+# End-of-File
+#