Browse Source

Rename `c`->`p` for the padding impls for increased readability.

Yawning Angel 3 years ago
parent
commit
8ef698805d
3 changed files with 44 additions and 45 deletions
  1. 2 2
      padding_impl.go
  2. 11 11
      padding_null.go
  3. 31 32
      padding_obfs4.go

+ 2 - 2
padding_impl.go

@@ -28,7 +28,7 @@ type paddingImpl interface {
 	OnClose()
 }
 
-func paddingImplGenericRead(conn *commonConn, recvBuf *bytes.Buffer, p []byte) (n int, err error) {
+func paddingImplGenericRead(conn *commonConn, recvBuf *bytes.Buffer, b []byte) (n int, err error) {
 	// This buffering strategy will return short reads, since a new record
 	// is only consumed off the network once the entirety of the previous
 	// record has been returned.  A goroutine that consumes off the network
@@ -56,7 +56,7 @@ func paddingImplGenericRead(conn *commonConn, recvBuf *bytes.Buffer, p []byte) (
 
 	// Service the Read using buffered payload.
 	if recvBuf.Len() > 0 && err == nil {
-		n, _ = recvBuf.Read(p)
+		n, _ = recvBuf.Read(b)
 	}
 	return
 }

+ 11 - 11
padding_null.go

@@ -38,18 +38,18 @@ type nullPadding struct {
 	recvBuf bytes.Buffer
 }
 
-func (c *nullPadding) Write(p []byte) (int, error) {
+func (p *nullPadding) Write(b []byte) (int, error) {
 	// Break the write up into records, and send them out on the wire.  The
 	// kernel is better at breaking writes into appropriate sized packets than
 	// any userland app will be (at least with TCP), so use the maximum record
 	// size permitted by the framing layer as padding isn't a concern.
-	for off, left := 0, len(p); left > 0; {
+	for off, left := 0, len(b); left > 0; {
 		wrSize := tentp.MaxPlaintextRecordSize
 		if left < wrSize {
 			wrSize = left
 		}
 
-		if err := c.conn.SendRawRecord(framing.CmdData, p[off:off+wrSize], 0); err != nil {
+		if err := p.conn.SendRawRecord(framing.CmdData, b[off:off+wrSize], 0); err != nil {
 			return 0, err
 		}
 
@@ -57,25 +57,25 @@ func (c *nullPadding) Write(p []byte) (int, error) {
 		left -= wrSize
 	}
 
-	return len(p), nil
+	return len(b), nil
 }
 
-func (c *nullPadding) Read(p []byte) (n int, err error) {
-	return paddingImplGenericRead(c.conn, &c.recvBuf, p)
+func (p *nullPadding) Read(b []byte) (n int, err error) {
+	return paddingImplGenericRead(p.conn, &p.recvBuf, b)
 }
 
-func (c *nullPadding) OnClose() {
-	c.recvBuf.Reset()
+func (p *nullPadding) OnClose() {
+	p.recvBuf.Reset()
 }
 
 func newNullPadding(conn *commonConn) paddingImpl {
-	c := new(nullPadding)
-	c.conn = conn
+	p := new(nullPadding)
+	p.conn = conn
 
 	// The net package default beahvior is to disable Nagle's algorithm,
 	// but it's more efficient to enable it, since the kernel will handle
 	// framing better than we can, especially for this use case.
 	conn.setNagle(true)
 
-	return c
+	return p
 }

+ 31 - 32
padding_obfs4.go

@@ -56,7 +56,7 @@ type obfs4Padding struct {
 	recvBuf bytes.Buffer
 }
 
-func (c *obfs4Padding) shortWrite(p []byte) (n int, err error) {
+func (p *obfs4Padding) shortWrite(b []byte) (n int, err error) {
 	// Special case len(p) being "short".
 	//
 	// This is kind of annoying to obfuscate, since sending 2 segments isn't
@@ -65,14 +65,13 @@ func (c *obfs4Padding) shortWrite(p []byte) (n int, err error) {
 	//
 	// So, attempt to be somewhat clever by disabling Nagle and sending short
 	// records sized to something from the distribution.
-	c.conn.setNagle(false)
-	defer c.conn.setNagle(true)
+	p.conn.setNagle(false)
+	defer p.conn.setNagle(true)
 
-	remaining := len(p)
-	for remaining > 0 {
+	for remaining := len(b); remaining > 0; {
 		// Sample from the "short" distribution, which omits values less than
 		// the tentp framing+payload overhead.
-		targetLen := c.shortDist.Sample(c.conn.mRNG)
+		targetLen := p.shortDist.Sample(p.conn.mRNG)
 		wrLen := targetLen - (tentp.FramingOverhead + tentp.PayloadOverhead)
 		padLen := 0
 		if remaining < wrLen {
@@ -80,21 +79,21 @@ func (c *obfs4Padding) shortWrite(p []byte) (n int, err error) {
 			wrLen = remaining
 		}
 
-		if err := c.conn.SendRawRecord(framing.CmdData, p[n:n+wrLen], padLen); err != nil {
+		if err := p.conn.SendRawRecord(framing.CmdData, b[n:n+wrLen], padLen); err != nil {
 			return 0, err
 		}
 		n += wrLen
 		remaining -= wrLen
 
 		// Always inject a delay here, since discrete packets are wanted.
-		delay := time.Duration(c.delayDist.Sample(c.conn.mRNG)) * time.Microsecond
+		delay := time.Duration(p.delayDist.Sample(p.conn.mRNG)) * time.Microsecond
 		time.Sleep(delay)
 	}
 
 	return
 }
 
-func (c *obfs4Padding) largeWrite(p []byte) (n int, err error) {
+func (p *obfs4Padding) largeWrite(b []byte) (n int, err error) {
 	// Because the generic io.Copy() code is used, this gets called with up to
 	// 32 kib of data.
 	//
@@ -113,15 +112,15 @@ func (c *obfs4Padding) largeWrite(p []byte) (n int, err error) {
 	// stood out more (vs packetizing and writing to a connection with Nagel
 	// enabled).
 
-	remaining := len(p)
+	remaining := len(b)
 	isLargeWrite := remaining >= 32*1024 // XXX: What about CopyBuffer?
 
-	tailPadLen := c.burstDist.Sample(c.conn.mRNG)
+	tailPadLen := p.burstDist.Sample(p.conn.mRNG)
 	// tailPadLen += c.conn.maxRecordSize * c.conn.mRNG.Intn(3)
 
 	// Write out each frame (with payload).
 	for remaining > 0 {
-		wrLen := c.conn.maxRecordSize
+		wrLen := p.conn.maxRecordSize
 		padLen := 0
 		if remaining <= wrLen {
 			// Append the padding to the last frame.
@@ -138,7 +137,7 @@ func (c *obfs4Padding) largeWrite(p []byte) (n int, err error) {
 			wrLen = remaining
 		}
 
-		if err := c.conn.SendRawRecord(framing.CmdData, p[n:n+wrLen], padLen); err != nil {
+		if err := p.conn.SendRawRecord(framing.CmdData, b[n:n+wrLen], padLen); err != nil {
 			return 0, err
 		}
 		n += wrLen
@@ -147,34 +146,34 @@ func (c *obfs4Padding) largeWrite(p []byte) (n int, err error) {
 
 	// Add a delay sampled from the IAT distribution if we do not suspect that
 	// further data will be coming shortly.
-	if c.method == PaddingObfs4BurstIAT && !isLargeWrite {
-		delay := time.Duration(c.delayDist.Sample(c.conn.mRNG)) * time.Microsecond
+	if p.method == PaddingObfs4BurstIAT && !isLargeWrite {
+		delay := time.Duration(p.delayDist.Sample(p.conn.mRNG)) * time.Microsecond
 		time.Sleep(delay)
 	}
 	return
 }
 
-func (c *obfs4Padding) Write(p []byte) (n int, err error) {
-	if len(p) > c.conn.maxRecordSize {
-		n, err = c.shortWrite(p)
+func (p *obfs4Padding) Write(b []byte) (n int, err error) {
+	if len(b) > p.conn.maxRecordSize {
+		n, err = p.shortWrite(b)
 	} else {
-		n, err = c.largeWrite(p)
+		n, err = p.largeWrite(b)
 	}
 	return
 }
 
-func (c *obfs4Padding) Read(p []byte) (int, error) {
-	return paddingImplGenericRead(c.conn, &c.recvBuf, p)
+func (p *obfs4Padding) Read(b []byte) (int, error) {
+	return paddingImplGenericRead(p.conn, &p.recvBuf, b)
 }
 
-func (c *obfs4Padding) OnClose() {
-	c.recvBuf.Reset()
+func (p *obfs4Padding) OnClose() {
+	p.recvBuf.Reset()
 }
 
 func newObfs4Padding(conn *commonConn, m PaddingMethod, seed []byte) (paddingImpl, error) {
-	c := new(obfs4Padding)
-	c.conn = conn
-	c.method = m
+	p := new(obfs4Padding)
+	p.conn = conn
+	p.method = m
 
 	if len(seed) != Obfs4SeedLength {
 		return nil, ErrInvalidPadding
@@ -185,16 +184,16 @@ func newObfs4Padding(conn *commonConn, m PaddingMethod, seed []byte) (paddingImp
 	//
 	// XXX: Cache the distributions? (Should these be biased?)
 	r := rand.NewDRBG(seed)
-	c.burstDist = discretedist.NewUniform(r, 1, c.conn.maxRecordSize, 100, false)
-	c.shortDist = discretedist.NewUniform(r, tentp.FramingOverhead+tentp.PayloadOverhead, c.conn.maxRecordSize, 100, false)
+	p.burstDist = discretedist.NewUniform(r, 1, p.conn.maxRecordSize, 100, false)
+	p.shortDist = discretedist.NewUniform(r, tentp.FramingOverhead+tentp.PayloadOverhead, p.conn.maxRecordSize, 100, false)
 
 	// IAT delay dist between 0 to 25 ms.
 	// Note: This is always needed due to the short write obfsucation strategy.
-	c.delayDist = discretedist.NewUniform(r, 0, 5*1000, 100, false)
-	if !c.conn.isClient {
+	p.delayDist = discretedist.NewUniform(r, 0, 5*1000, 100, false)
+	if !p.conn.isClient {
 		// Add random [0, 2 * tau) read delay to mask timings on data
 		// fed to the upstream as well.
-		c.conn.enableReadDelay = true
+		p.conn.enableReadDelay = true
 	}
 
 	// There's a fundemental mismatch between what our idea of a packet should
@@ -205,5 +204,5 @@ func newObfs4Padding(conn *commonConn, m PaddingMethod, seed []byte) (paddingImp
 	// disconnect.
 	conn.setNagle(true)
 
-	return c, nil
+	return p, nil
 }