# -*- coding: utf-8 -*-
"""
Estimate cost of solving LWE using Gröbner bases.
See :ref:`Arora-GB` for an overview.
"""
from sage.all import (
binomial,
ceil,
exp,
floor,
log,
oo,
pi,
PowerSeriesRing,
prod,
QQ,
RR,
RealField,
sqrt,
)
from .cost import Cost
from .lwe_parameters import LWEParameters
from .io import Logging
[docs]
def gb_cost(n, D, omega=2, prec=None):
"""
Estimate the complexity of computing a Gröbner basis.
:param n: Number of variables n > 0.
:param D: Tuple of `(d,m)` pairs where `m` is number polynomials and `d` is a degree.
:param omega: Linear algebra exponent, i.e. matrix-multiplication costs `O(n^ω)` operations.
:param prec: Compute power series up to this precision (default: `2n`).
EXAMPLE::
>>> from estimator.gb import gb_cost
>>> gb_cost(128, [(2, 256)])
rop: ≈2^144.6, dreg: 17, mem: ≈2^144.6
"""
prec = 2 * n if prec is None else prec
R = PowerSeriesRing(QQ, "z", prec)
z = R.gen()
z = z.add_bigoh(prec)
s = R(1)
s = s.add_bigoh(prec)
s = prod(((1 - z**d)**m for d, m in D), s) / (1 - z) ** n
retval = Cost(rop=oo, dreg=oo)
retval.register_impermanent({"rop": True, "dreg": False, "mem": False})
for dreg in range(prec):
if s[dreg] < 0:
retval["dreg"] = dreg
retval["rop"] = binomial(n + dreg, dreg) ** omega
retval["mem"] = binomial(n + dreg, dreg) ** 2
break
return retval
[docs]
class AroraGB:
[docs]
@staticmethod
def ps_single(C):
"""
Probability that a Gaussian is within `C` standard deviations.
"""
RR = RealField(256)
C = RR(C)
return RR(1 - (RR(2) / (C * RR(sqrt(2 * pi))) * exp(-(C**2) / RR(2))))
[docs]
@classmethod
def cost_bounded(cls, params, success_probability=0.99, omega=2, log_level=1, **kwds):
"""
Estimate cost using absolute bounds for secrets and noise.
:param params: LWE parameters.
:param success_probability: target success probability
:param omega: linear algebra constant.
"""
d = params.Xe.bounds[1] - params.Xe.bounds[0] + 1
dn = cls.equations_for_secret(params)
m = min(params.m, params.n**d)
cost = gb_cost(params.n, [(d, m)] + dn)
cost["t"] = (d - 1) // 2
if cost["dreg"] < oo and binomial(params.n + cost["dreg"], cost["dreg"]) < params.m:
cost["m"] = binomial(params.n + cost["dreg"], cost["dreg"])
else:
cost["m"] = m
cost.register_impermanent(t=False, m=True)
return cost
[docs]
@classmethod
def cost_Gaussian_like(cls, params, success_probability=0.99, omega=2, log_level=1, **kwds):
"""
Estimate cost using absolute bounds for secrets and Gaussian tail bounds for noise.
:param params: LWE parameters.
:param success_probability: target success probability
:param omega: linear algebra constant.
"""
dn = cls.equations_for_secret(params)
best, stuck = None, 0
def t_and_m_can(t):
C = RR(t / params.Xe.stddev)
assert C >= 1 # if C is too small, we ignore it
# Pr[success]^m = Pr[overall success]
single_prob = AroraGB.ps_single(C)
if single_prob == 1:
m_can = 2**31 # some arbitrary max
else:
# log(success_probability, single_prob)
# == log(success_probability, 2) / log(single_prob, 2)
m_can = floor(log(success_probability, single_prob))
return t, m_can
for t, m_can in map(t_and_m_can, range(ceil(params.Xe.stddev), params.n)):
if m_can > params.m:
break
d = 2 * t + 1
current = gb_cost(params.n, [(d, m_can)] + dn, omega)
if current["dreg"] == oo:
continue
current["t"] = t
current["m"] = m_can
current.register_impermanent(t=False, m=True)
current = current.reorder("rop", "m", "dreg", "t")
Logging.log("repeat", log_level + 1, repr(current))
if best is None:
best = current
elif best > current:
best = current
stuck = 0
else:
stuck += 1
if stuck >= 5:
break
return best if best is not None else Cost(rop=oo, dreg=oo)
[docs]
@classmethod
def equations_for_secret(cls, params):
"""
Return ``(d,n)`` tuple to encode that `n` equations of degree `d` are available from the LWE secret.
:param params: LWE parameters.
"""
if params.Xs > params.Xe:
return []
a, b = params.Xs.bounds
if b - a < oo:
d = b - a + 1
elif params.Xs.is_Gaussian_like:
d = 2 * ceil(3 * params.Xs.stddev) + 1
else:
raise NotImplementedError(f"Do not know how to handle {params.Xs}.")
return [(d, params.n)]
[docs]
def __call__(
self, params: LWEParameters, success_probability=0.99, omega=2, log_level=1, **kwds
):
"""
Arora-GB as described in [ICALP:AroGe11]_, [EPRINT:ACFP14]_.
:param params: LWE parameters.
:param success_probability: targeted success probability < 1.
:param omega: linear algebra constant.
:return: A cost dictionary
The returned cost dictionary has the following entries:
- ``rop``: Total number of word operations (≈ CPU cycles).
- ``m``: Number of samples consumed.
- ``dreg``: The degree of regularity or "solving degree".
- ``t``: Polynomials of degree 2t + 1 are considered.
- ``mem``: Total memory usage.
EXAMPLE::
>>> from estimator import *
>>> params = LWE.Parameters(n=64, q=7681, Xs=ND.DiscreteGaussian(3.0), Xe=ND.DiscreteGaussian(3.0), m=2**50)
>>> LWE.arora_gb(params)
rop: ≈2^307.1, m: ≈2^46.8, dreg: 99, t: 25, mem: ≈2^307.1, tag: arora-gb
TESTS::
>>> LWE.arora_gb(params.updated(m=2**120))
rop: ≈2^282.6, m: ≈2^101.1, dreg: 83, t: 36, mem: ≈2^282.6, tag: arora-gb
>>> LWE.arora_gb(params.updated(Xe=ND.UniformMod(7)))
rop: ≈2^60.6, dreg: 7, mem: ≈2^60.6, t: 3, m: ≈2^30.3, tag: arora-gb
>>> LWE.arora_gb(params.updated(Xe=ND.CenteredBinomial(8)))
rop: ≈2^122.3, dreg: 19, mem: ≈2^122.3, t: 8, m: ≈2^50.0, tag: arora-gb
>>> LWE.arora_gb(params.updated(Xs=ND.UniformMod(5), Xe=ND.CenteredBinomial(4), m=1024))
rop: ≈2^227.2, dreg: 54, mem: ≈2^227.2, t: 4, m: 1024, tag: arora-gb
>>> LWE.arora_gb(params.updated(Xs=ND.UniformMod(3), Xe=ND.CenteredBinomial(4), m=1024))
rop: ≈2^189.9, dreg: 39, mem: ≈2^189.9, t: 4, m: 1024, tag: arora-gb
>>> LWE.arora_gb(LWE.Parameters(n=1024, q=2**40, Xs=ND.SparseBinary(64), Xe=ND.DiscreteGaussian(2**10)))
rop: ≈2^inf, dreg: ≈2^inf, tag: arora-gb
.. [EPRINT:ACFP14] Martin R. Albrecht, Carlos Cid, Jean-Charles Faugère & Ludovic Perret. (2014).
Algebraic algorithms for LWE. https://eprint.iacr.org/2014/1018
.. [ICALP:AroGe11] Sanjeev Arora & Rong Ge. (2011). New algorithms for learning in presence of
errors. In L. Aceto, M. Henzinger, & J. Sgall, ICALP 2011, Part I (pp. 403–415).:
Springer, Heidelberg.
"""
params = params.normalize()
best = Cost(rop=oo, dreg=oo)
best.register_impermanent({"rop": True, "dreg": False, "mem": False})
if params.Xe.is_bounded:
cost = self.cost_bounded(
params,
success_probability=success_probability,
omega=omega,
log_level=log_level,
)
Logging.log("gb", log_level, f"b: {cost!r}")
best = min(best, cost, key=lambda x: x["dreg"])
if params.Xe.is_Gaussian_like:
cost = self.cost_Gaussian_like(
params,
success_probability=success_probability,
omega=omega,
log_level=log_level,
)
Logging.log("gb", log_level, f"G: {cost!r}")
best = min(best, cost, key=lambda x: x["dreg"])
best["tag"] = "arora-gb"
best["problem"] = params
return best
__name__ = "arora_gb"
arora_gb = AroraGB()