|
@@ -54,36 +54,20 @@ const (
|
|
|
type obfs4Padding struct {
|
|
|
conn *commonConn
|
|
|
|
|
|
- burstDist *discretedist.DiscreteDist
|
|
|
- shortDist *discretedist.DiscreteDist
|
|
|
- delayDist *discretedist.DiscreteDist
|
|
|
+ burstDist *discretedist.DiscreteDist
|
|
|
+ packetDist *discretedist.DiscreteDist
|
|
|
+ delayDist *discretedist.DiscreteDist
|
|
|
|
|
|
method PaddingMethod
|
|
|
|
|
|
recvBuf bytes.Buffer
|
|
|
}
|
|
|
|
|
|
-func (p *obfs4Padding) shortWrite(b []byte) (n int, err error) {
|
|
|
- if p.method != PaddingObfs4PacketIAT {
|
|
|
- // Special case len(p) being "short".
|
|
|
- //
|
|
|
- // This is kind of annoying to obfuscate, since sending 2 segments
|
|
|
- // isn't that different from sending 1 segment, and I assume the
|
|
|
- // forces of evil know how to count.
|
|
|
- //
|
|
|
- // So, attempt to be somewhat clever by disabling Nagle and sending
|
|
|
- // short records sized to something from the distribution.
|
|
|
- //
|
|
|
- // These concerns naturally do not apply to the packetization
|
|
|
- // based obfuscation method.
|
|
|
- p.conn.setNagle(false)
|
|
|
- defer p.conn.setNagle(true)
|
|
|
- }
|
|
|
-
|
|
|
+func (p *obfs4Padding) packetWrite(b []byte) (n int, err error) {
|
|
|
for remaining := len(b); remaining > 0; {
|
|
|
- // Sample from the "short" distribution, which omits values less than
|
|
|
+ // Sample from the packet distribution, which omits values less than
|
|
|
// the tentp framing+payload overhead.
|
|
|
- targetLen := p.shortDist.Sample(p.conn.mRNG)
|
|
|
+ targetLen := p.packetDist.Sample(p.conn.mRNG)
|
|
|
wrLen := targetLen - (tentp.FramingOverhead + tentp.PayloadOverhead)
|
|
|
padLen := 0
|
|
|
if remaining < wrLen {
|
|
@@ -105,12 +89,12 @@ func (p *obfs4Padding) shortWrite(b []byte) (n int, err error) {
|
|
|
return
|
|
|
}
|
|
|
|
|
|
-func (p *obfs4Padding) largeWrite(b []byte) (n int, err error) {
|
|
|
+func (p *obfs4Padding) burstWrite(b []byte) (n int, err error) {
|
|
|
// Because the generic io.Copy() code is used, this gets called with up to
|
|
|
- // 32 kib of data.
|
|
|
+ // p.conn.copyBufferSize of data (32 KiB default).
|
|
|
//
|
|
|
// There's an interesting problem in that it *always* will get called with
|
|
|
- // 32 kib of data when doing bulk trasfers.
|
|
|
+ // the maximum amount of data when doing bulk trasfers.
|
|
|
//
|
|
|
// If I could get Linux-ish TCP_INFO on all platforms, the obvious
|
|
|
// solution would be to packetize things in userland and write based on
|
|
@@ -121,32 +105,31 @@ func (p *obfs4Padding) largeWrite(b []byte) (n int, err error) {
|
|
|
//
|
|
|
// The obfs4 version of this code buffered and sent everything all at
|
|
|
// once, and I'm not sure if that's great because bulk transfers proably
|
|
|
- // stood out more (vs packetizing and writing to a connection with Nagel
|
|
|
+ // stood out more (vs packetizing and writing to a connection with Nagle
|
|
|
// enabled).
|
|
|
|
|
|
remaining := len(b)
|
|
|
isLargeWrite := remaining >= p.conn.copyBufferSize
|
|
|
|
|
|
- tailPadLen := p.burstDist.Sample(p.conn.mRNG)
|
|
|
- // tailPadLen += c.conn.maxRecordSize * c.conn.mRNG.Intn(3)
|
|
|
+ tailTargetLen := p.burstDist.Sample(p.conn.mRNG)
|
|
|
|
|
|
// Write out each frame (with payload).
|
|
|
for remaining > 0 {
|
|
|
wrLen := p.conn.maxRecordSize
|
|
|
padLen := 0
|
|
|
if remaining <= wrLen {
|
|
|
+ wrLen = remaining
|
|
|
+
|
|
|
// Append the padding to the last frame.
|
|
|
- if tailPadLen < tentp.FramingOverhead+tentp.PayloadOverhead+wrLen {
|
|
|
+ if tailTargetLen < tentp.FramingOverhead+tentp.PayloadOverhead+wrLen {
|
|
|
// Need to also pad out to a "full" record.
|
|
|
- tailPadLen += wrLen - remaining
|
|
|
+ tailTargetLen += p.conn.maxRecordSize - remaining
|
|
|
} else {
|
|
|
// The tail of the burst counts towards part of the
|
|
|
// padding.
|
|
|
- tailPadLen -= tentp.FramingOverhead + tentp.PayloadOverhead + remaining
|
|
|
+ tailTargetLen -= tentp.FramingOverhead + tentp.PayloadOverhead + remaining
|
|
|
}
|
|
|
-
|
|
|
- padLen = tailPadLen
|
|
|
- wrLen = remaining
|
|
|
+ padLen = tailTargetLen
|
|
|
}
|
|
|
|
|
|
if err := p.conn.SendRawRecord(framing.CmdData, b[n:n+wrLen], padLen); err != nil {
|
|
@@ -166,10 +149,10 @@ func (p *obfs4Padding) largeWrite(b []byte) (n int, err error) {
|
|
|
}
|
|
|
|
|
|
func (p *obfs4Padding) Write(b []byte) (n int, err error) {
|
|
|
- if len(b) < p.conn.maxRecordSize || p.method == PaddingObfs4PacketIAT {
|
|
|
- n, err = p.shortWrite(b)
|
|
|
+ if p.method == PaddingObfs4PacketIAT {
|
|
|
+ n, err = p.packetWrite(b)
|
|
|
} else {
|
|
|
- n, err = p.largeWrite(b)
|
|
|
+ n, err = p.burstWrite(b)
|
|
|
}
|
|
|
return
|
|
|
}
|
|
@@ -197,11 +180,8 @@ func newObfs4Padding(conn *commonConn, m PaddingMethod, seed []byte) (paddingImp
|
|
|
// XXX: Cache the distributions? (Should these be biased?)
|
|
|
r := rand.NewDRBG(seed)
|
|
|
p.burstDist = discretedist.NewUniform(r, 1, p.conn.maxRecordSize, 100, false)
|
|
|
- p.shortDist = discretedist.NewUniform(r, tentp.FramingOverhead+tentp.PayloadOverhead, p.conn.maxRecordSize, 100, false)
|
|
|
-
|
|
|
- // IAT delay dist between 0 to 25 ms.
|
|
|
- // Note: This is always needed due to the short write obfsucation strategy.
|
|
|
- p.delayDist = discretedist.NewUniform(r, 0, 5*1000, 100, false)
|
|
|
+ p.packetDist = discretedist.NewUniform(r, tentp.FramingOverhead+tentp.PayloadOverhead, p.conn.maxRecordSize, 100, false)
|
|
|
+ p.delayDist = discretedist.NewUniform(r, 0, 5*1000, 100, false) // 0 to 5 ms.
|
|
|
|
|
|
// Add random [0, 2 * tau) read delay to mask timings on data
|
|
|
// fed to the upstream as well.
|
|
@@ -214,7 +194,7 @@ func newObfs4Padding(conn *commonConn, m PaddingMethod, seed []byte) (paddingImp
|
|
|
p.conn.enforceRecordSize = true
|
|
|
} else {
|
|
|
// There's a fundemental mismatch between what our idea of a packet
|
|
|
- //should be and what should be sent over the wire due to
|
|
|
+ // should be and what should be sent over the wire due to
|
|
|
// unavailable/inaccurate PMTU information, and variable length TCP
|
|
|
// headers (SACK options).
|
|
|
//
|