Compare commits

..

1 Commits

Author SHA1 Message Date
Petteri Aimonen
7a9c29f2d7 Perform field initialization to defaults only when the field is skipped.
Avoids unnecessary initialization & unnecessary scan of
the pb_field_t array.

Runtime on Cortex-M3 -5%, code size +2%.

Could need some more testing with random field orders.
Have to write a tool to randomize fields in a message.
2013-04-05 21:35:36 +03:00
111 changed files with 684 additions and 2969 deletions

20
.gitignore vendored
View File

@@ -7,7 +7,6 @@
*.pb *.pb
*~ *~
*.tar.gz *.tar.gz
.sconsign.dblite
julkaisu.txt julkaisu.txt
docs/*.html docs/*.html
docs/generator_flow.png docs/generator_flow.png
@@ -19,3 +18,22 @@ example_avr_double/test_conversions
example_unions/decode example_unions/decode
example_unions/encode example_unions/encode
generator/nanopb_pb2.pyc generator/nanopb_pb2.pyc
tests/decode_unittests
tests/encode_unittests
tests/test_compiles
tests/test_decode1
tests/test_decode2
tests/test_decode3
tests/test_decode3_buf
tests/test_decode_callbacks
tests/test_encode1
tests/test_encode2
tests/test_encode3
tests/test_encode3_buf
tests/test_encode_callbacks
tests/test_missing_fields
tests/test_multiple_files
tests/bc_decode
tests/bc_encode
tests/breakpoints

View File

@@ -1,38 +1,3 @@
nanopb-0.2.3
Improve compatibility by removing ternary operator from initializations (issue 88)
Fix build error on Visual C++ (issue 84, patch by Markus Schwarzenberg)
Don't stop on unsupported extension fields (issue 83)
Add an example pb_syshdr.h file for non-C99 compilers
Reorganize tests and examples into subfolders (issue 63)
Switch from Makefiles to scons for building the tests
Make the tests buildable on Windows
nanopb-0.2.2
Add support for extension fields (issue 17)
Fix unknown fields in empty message (issue 78)
Include the field tags in the generated .pb.h file.
Add pb_decode_delimited and pb_encode_delimited wrapper functions (issue 74)
Add a section in top of pb.h for changing compilation settings (issue 76)
Documentation improvements (issues 12, 77 and others)
Improved tests
nanopb-0.2.1
NOTE: The default callback function signature has changed.
If you don't want to update your code, define PB_OLD_CALLBACK_STYLE.
Change the callback function to use void** (issue 69)
Add support for defining the nanopb options in a separate file (issue 12)
Add support for packed structs in IAR and MSVC (in addition to GCC) (issue 66)
Implement error message support for the encoder side (issue 7)
Handle unterminated strings when encoding (issue 68)
Fix bug with empty strings in repeated string callbacks (issue 73)
Fix regression in 0.2.0 with optional callback fields (issue 70)
Fix bugs with empty message types (issues 64, 65)
Fix some compiler warnings on clang (issue 67)
Some portability improvements (issues 60, 62)
Various new generator options
Improved tests
nanopb-0.2.0 nanopb-0.2.0
NOTE: This release requires you to regenerate all .pb.c NOTE: This release requires you to regenerate all .pb.c
files. Files generated by older versions will not files. Files generated by older versions will not

View File

@@ -1,94 +0,0 @@
/* This is an example of a header file for platforms/compilers that do
* not come with stdint.h/stddef.h/stdbool.h/string.h. To use it, define
* PB_SYSTEM_HEADER as "pb_syshdr.h", including the quotes, and add the
* compat folder to your include path.
*
* It is very likely that you will need to customize this file to suit
* your platform. For any compiler that supports C99, this file should
* not be necessary.
*/
#ifndef _PB_SYSHDR_H_
#define _PB_SYSHDR_H_
/* stdint.h subset */
#ifdef HAVE_STDINT_H
#include <stdint.h>
#else
/* You will need to modify these to match the word size of your platform. */
typedef signed char int8_t;
typedef unsigned char uint8_t;
typedef signed short int16_t;
typedef unsigned short uint16_t;
typedef signed int int32_t;
typedef unsigned int uint32_t;
typedef signed long long int64_t;
typedef unsigned long long uint64_t;
#endif
/* stddef.h subset */
#ifdef HAVE_STDDEF_H
#include <stddef.h>
#else
typedef uint32_t size_t;
#define offsetof(st, m) ((size_t)(&((st *)0)->m))
#ifndef NULL
#define NULL 0
#endif
#endif
/* stdbool.h subset */
#ifdef HAVE_STDBOOL_H
#include <stdbool.h>
#else
#ifndef __cplusplus
typedef int bool;
#define false 0
#define true 1
#endif
#endif
/* string.h subset */
#ifdef HAVE_STRING_H
#include <string.h>
#else
/* Implementations are from the Public Domain C Library (PDCLib). */
static size_t strlen( const char * s )
{
size_t rc = 0;
while ( s[rc] )
{
++rc;
}
return rc;
}
static void * memcpy( void *s1, const void *s2, size_t n )
{
char * dest = (char *) s1;
const char * src = (const char *) s2;
while ( n-- )
{
*dest++ = *src++;
}
return s1;
}
static void * memset( void * s, int c, size_t n )
{
unsigned char * p = (unsigned char *) s;
while ( n-- )
{
*p++ = (unsigned char) c;
}
return s;
}
#endif
#endif

View File

@@ -10,40 +10,47 @@ The things outlined here are the underlying concepts of the nanopb design.
Proto files Proto files
=========== ===========
All Protocol Buffers implementations use .proto files to describe the message All Protocol Buffers implementations use .proto files to describe the message format.
format. The point of these files is to be a portable interface description The point of these files is to be a portable interface description language.
language.
Compiling .proto files for nanopb Compiling .proto files for nanopb
--------------------------------- ---------------------------------
Nanopb uses the Google's protoc compiler to parse the .proto file, and then a Nanopb uses the Google's protoc compiler to parse the .proto file, and then a python script to generate the C header and source code from it::
python script to generate the C header and source code from it::
user@host:~$ protoc -omessage.pb message.proto user@host:~$ protoc -omessage.pb message.proto
user@host:~$ python ../generator/nanopb_generator.py message.pb user@host:~$ python ../generator/nanopb_generator.py message.pb
Writing to message.h and message.c Writing to message.h and message.c
user@host:~$ user@host:~$
Modifying generator behaviour Compiling .proto files with nanopb options
----------------------------- ------------------------------------------
Using generator options, you can set maximum sizes for fields in order to Nanopb defines two extensions for message fields described in .proto files: *max_size* and *max_count*.
allocate them statically. The preferred way to do this is to create an .options These are the maximum size of a string and maximum count of items in an array::
file with the same name as your .proto file::
# Foo.proto required string name = 1 [(nanopb).max_size = 40];
message Foo { repeated PhoneNumber phone = 4 [(nanopb).max_count = 5];
required string name = 1;
}
:: To use these extensions, you need to place an import statement in the beginning of the file::
# Foo.options import "nanopb.proto";
Foo.name max_size:16
For more information on this, see the `Proto file options`_ section in the This file, in turn, requires the file *google/protobuf/descriptor.proto*. This is usually installed under */usr/include*. Therefore, to compile a .proto file which uses options, use a protoc command similar to::
reference manual.
protoc -I/usr/include -Inanopb/generator -I. -omessage.pb message.proto
The options can be defined in file, message and field scopes::
option (nanopb_fileopt).max_size = 20; // File scope
message Message
{
option (nanopb_msgopt).max_size = 30; // Message scope
required string fieldsize = 1 [(nanopb).max_size = 40]; // Field scope
}
It is also possible to give the options on command line, but then they will affect the whole file. For example::
user@host:~$ python ../generator/nanopb_generator.py -s 'max_size: 20' message.pb
.. _`Proto file options`: reference.html#proto-file-options
Streams Streams
======= =======
@@ -256,50 +263,6 @@ generates this field description array for the structure *Person_PhoneNumber*::
}; };
Extension fields
================
Protocol Buffers supports a concept of `extension fields`_, which are
additional fields to a message, but defined outside the actual message.
The definition can even be in a completely separate .proto file.
The base message is declared as extensible by keyword *extensions* in
the .proto file::
message MyMessage {
.. fields ..
extensions 100 to 199;
}
For each extensible message, *nanopb_generator.py* declares an additional
callback field called *extensions*. The field and associated datatype
*pb_extension_t* forms a linked list of handlers. When an unknown field is
encountered, the decoder calls each handler in turn until either one of them
handles the field, or the list is exhausted.
The actual extensions are declared using the *extend* keyword in the .proto,
and are in the global namespace::
extend MyMessage {
optional int32 myextension = 100;
}
For each extension, *nanopb_generator.py* creates a constant of type
*pb_extension_type_t*. To link together the base message and the extension,
you have to:
1. Allocate storage for your field, matching the datatype in the .proto.
For example, for a *int32* field, you need a *int32_t* variable to store
the value.
2. Create a *pb_extension_t* constant, with pointers to your variable and
to the generated *pb_extension_type_t*.
3. Set the *message.extensions* pointer to point to the *pb_extension_t*.
An example of this is available in *tests/test_encode_extensions.c* and
*tests/test_decode_extensions.c*.
.. _`extension fields`: https://developers.google.com/protocol-buffers/docs/proto#extensions
Return values and error handling Return values and error handling
================================ ================================

View File

@@ -41,7 +41,7 @@ Features and limitations
#) Allows specifying maximum size for strings and arrays, so that they can be allocated statically. #) Allows specifying maximum size for strings and arrays, so that they can be allocated statically.
#) No malloc needed: everything can be allocated statically or on the stack. #) No malloc needed: everything can be allocated statically or on the stack.
#) You can use either encoder or decoder alone to cut the code size in half. #) You can use either encoder or decoder alone to cut the code size in half.
#) Support for most protobuf features, including: all data types, nested submessages, default values, repeated and optional fields, packed arrays, extension fields. #) Support for most protobuf features, including: all data types, nested submessages, default values, repeated and optional fields, packed arrays.
#) Callback mechanism for handling messages larger than can fit in available RAM. #) Callback mechanism for handling messages larger than can fit in available RAM.
#) Extensive set of tests. #) Extensive set of tests.
@@ -90,37 +90,20 @@ After that, buffer will contain the encoded message.
The number of bytes in the message is stored in *stream.bytes_written*. The number of bytes in the message is stored in *stream.bytes_written*.
You can feed the message to *protoc --decode=Example message.proto* to verify its validity. You can feed the message to *protoc --decode=Example message.proto* to verify its validity.
For a complete example of the simple case, see *example/simple.c*. For complete examples of the simple cases, see *tests/test_decode1.c* and *tests/test_encode1.c*. For an example with network interface, see the *example* subdirectory.
For a more complex example with network interface, see the *example/network_server* subdirectory.
Compiler requirements Compiler requirements
===================== =====================
Nanopb should compile with most ansi-C compatible compilers. It however Nanopb should compile with most ansi-C compatible compilers. It however requires a few header files to be available:
requires a few header files to be available:
#) *string.h*, with these functions: *strlen*, *memcpy*, *memset* #) *string.h*, with these functions: *strlen*, *memcpy*, *memset*
#) *stdint.h*, for definitions of *int32_t* etc. #) *stdint.h*, for definitions of *int32_t* etc.
#) *stddef.h*, for definition of *size_t* #) *stddef.h*, for definition of *size_t*
#) *stdbool.h*, for definition of *bool* #) *stdbool.h*, for definition of *bool*
If these header files do not come with your compiler, you can use the If these header files do not come with your compiler, you should be able to find suitable replacements online. Mostly the requirements are very simple, just a few basic functions and typedefs.
file *compat/pb_syshdr.h* instead. It contains an example of how to provide
the dependencies. You may have to edit it a bit to suit your custom platform.
To use the pb_syshdr.h, define *PB_SYSTEM_HEADER* to be the name of your custom Debugging and testing
header file. It should provide all the dependencies listed above. =====================
Extensive unittests are included under the *tests* folder. Just type *make* there to run the tests.
Running the test cases
======================
Extensive unittests and test cases are included under the *tests* folder.
To build the tests, you will need the `scons`__ build system. The tests should
be runnable on most platforms. Windows and Linux builds are regularly tested.
__ http://www.scons.org/
In addition to the build system, you will also need a working Google Protocol
Buffers *protoc* compiler, and the Python bindings for Protocol Buffers. On
Debian-based systems, install the following packages: *protobuf-compiler*,
*python-protobuf* and *libprotobuf-dev*.

View File

@@ -6,187 +6,31 @@ Nanopb: API reference
.. contents :: .. contents ::
Compilation options Compilation options
=================== ===================
The following options can be specified in one of two ways: The following options can be specified using -D switch given to the C compiler:
1. Using the -D switch on the C compiler command line. ============================ ================================================================================================
2. By #defining them at the top of pb.h. __BIG_ENDIAN__ Set this if your platform stores integers and floats in big-endian format.
Mixed-endian systems (different layout for ints and floats) are currently not supported.
You must have the same settings for the nanopb library and all code that NANOPB_INTERNALS Set this to expose the field encoder functions that are hidden since nanopb-0.1.3.
includes pb.h. PB_MAX_REQUIRED_FIELDS Maximum number of required fields to check for presence. Default value is 64. Increases stack
usage 1 byte per every 8 fields. Compiler warning will tell if you need this.
============================ ================================================ PB_FIELD_16BIT Add support for tag numbers > 255 and fields larger than 255 bytes or 255 array entries.
__BIG_ENDIAN__ Set this if your platform stores integers and Increases code size 3 bytes per each field. Compiler error will tell if you need this.
floats in big-endian format. Mixed-endian PB_FIELD_32BIT Add support for tag numbers > 65535 and fields larger than 65535 bytes or 65535 array entries.
systems (different layout for ints and floats) Increases code size 9 bytes per each field. Compiler error will tell if you need this.
are currently not supported. PB_NO_ERRMSG Disables the support for error messages; only error information is the true/false return value.
NANOPB_INTERNALS Set this to expose the field encoder functions Decreases the code size by a few hundred bytes.
that are hidden since nanopb-0.1.3. PB_BUFFER_ONLY Disables the support for custom streams. Only supports encoding to memory buffers.
PB_MAX_REQUIRED_FIELDS Maximum number of required fields to check for Speeds up execution and decreases code size slightly.
presence. Default value is 64. Increases stack PB_OLD_CALLBACK_STYLE Use the old function signature (void\* instead of void\*\*) for callback fields. This was the
usage 1 byte per every 8 fields. Compiler
warning will tell if you need this.
PB_FIELD_16BIT Add support for tag numbers > 255 and fields
larger than 255 bytes or 255 array entries.
Increases code size 3 bytes per each field.
Compiler error will tell if you need this.
PB_FIELD_32BIT Add support for tag numbers > 65535 and fields
larger than 65535 bytes or 65535 array entries.
Increases code size 9 bytes per each field.
Compiler error will tell if you need this.
PB_NO_ERRMSG Disables the support for error messages; only
error information is the true/false return
value. Decreases the code size by a few hundred
bytes.
PB_BUFFER_ONLY Disables the support for custom streams. Only
supports encoding and decoding with memory
buffers. Speeds up execution and decreases code
size slightly.
PB_OLD_CALLBACK_STYLE Use the old function signature (void\* instead
of void\*\*) for callback fields. This was the
default until nanopb-0.2.1. default until nanopb-0.2.1.
PB_SYSTEM_HEADER Replace the standard header files with a single ============================ ================================================================================================
header file. It should define all the required
functions and typedefs listed on the
`overview page`_. Value must include quotes,
for example *#define PB_SYSTEM_HEADER "foo.h"*.
============================ ================================================
The PB_MAX_REQUIRED_FIELDS, PB_FIELD_16BIT and PB_FIELD_32BIT settings allow
raising some datatype limits to suit larger messages. Their need is recognized
automatically by C-preprocessor #if-directives in the generated .pb.h files.
The default setting is to use the smallest datatypes (least resources used).
.. _`overview page`: index.html#compiler-requirements
Proto file options
==================
The generator behaviour can be adjusted using these options, defined in the
'nanopb.proto' file in the generator folder:
============================ ================================================
max_size Allocated size for *bytes* and *string* fields.
max_count Allocated number of entries in arrays
(*repeated* fields).
type Type of the generated field. Default value
is *FT_DEFAULT*, which selects automatically.
You can use *FT_CALLBACK*, *FT_STATIC* or
*FT_IGNORE* to force a callback field, a static
field or to completely ignore the field.
long_names Prefix the enum name to the enum value in
definitions, i.e. *EnumName_EnumValue*. Enabled
by default.
packed_struct Make the generated structures packed.
NOTE: This cannot be used on CPUs that break
on unaligned accesses to variables.
============================ ================================================
These options can be defined for the .proto files before they are converted
using the nanopb-generatory.py. There are three ways to define the options:
1. Using a separate .options file.
This is the preferred way as of nanopb-0.2.1, because it has the best
compatibility with other protobuf libraries.
2. Defining the options on the command line of nanopb_generator.py.
This only makes sense for settings that apply to a whole file.
3. Defining the options in the .proto file using the nanopb extensions.
This is the way used in nanopb-0.1, and will remain supported in the
future. It however sometimes causes trouble when using the .proto file
with other protobuf libraries.
The effect of the options is the same no matter how they are given. The most
common purpose is to define maximum size for string fields in order to
statically allocate them.
Defining the options in a .options file
---------------------------------------
The preferred way to define options is to have a separate file
'myproto.options' in the same directory as the 'myproto.proto'. ::
# myproto.proto
message MyMessage {
required string name = 1;
repeated int32 ids = 4;
}
::
# myproto.options
MyMessage.name max_size:40
MyMessage.ids max_count:5
The generator will automatically search for this file and read the
options from it. The file format is as follows:
* Lines starting with '#' or '//' are regarded as comments.
* Blank lines are ignored.
* All other lines should start with a field name pattern, followed by one or
more options. For example: *"MyMessage.myfield max_size:5 max_count:10"*.
* The field name pattern is matched against a string of form *'Message.field'*.
For nested messages, the string is *'Message.SubMessage.field'*.
* The field name pattern may use the notation recognized by Python fnmatch():
- *\** matches any part of string, like 'Message.\*' for all fields
- *\?* matches any single character
- *[seq]* matches any of characters 's', 'e' and 'q'
- *[!seq]* matches any other character
* The options are written as *'option_name:option_value'* and several options
can be defined on same line, separated by whitespace.
* Options defined later in the file override the ones specified earlier, so
it makes sense to define wildcard options first in the file and more specific
ones later.
If preferred, the name of the options file can be set using the command line
switch *-f* to nanopb_generator.py.
Defining the options on command line
------------------------------------
The nanopb_generator.py has a simple command line option *-s OPTION:VALUE*.
The setting applies to the whole file that is being processed.
Defining the options in the .proto file
---------------------------------------
The .proto file format allows defining custom options for the fields.
The nanopb library comes with *nanopb.proto* which does exactly that, allowing
you do define the options directly in the .proto file::
import "nanopb.proto";
message MyMessage {
required string name = 1 [(nanopb).max_size = 40];
repeated int32 ids = 4 [(nanopb).max_count = 5];
}
A small complication is that you have to set the include path of protoc so that
nanopb.proto can be found. This file, in turn, requires the file
*google/protobuf/descriptor.proto*. This is usually installed under
*/usr/include*. Therefore, to compile a .proto file which uses options, use a
protoc command similar to::
protoc -I/usr/include -Inanopb/generator -I. -omessage.pb message.proto
The options can be defined in file, message and field scopes::
option (nanopb_fileopt).max_size = 20; // File scope
message Message
{
option (nanopb_msgopt).max_size = 30; // Message scope
required string fieldsize = 1 [(nanopb).max_size = 40]; // Field scope
}
The PB_MAX_REQUIRED_FIELDS, PB_FIELD_16BIT and PB_FIELD_32BIT settings allow raising some datatype limits to suit larger messages.
Their need is recognized automatically by C-preprocessor #if-directives in the generated .pb.h files. The default setting is to use
the smallest datatypes (least resources used).
pb.h pb.h
==== ====
@@ -304,76 +148,6 @@ Protocol Buffers wire types. These are used with `pb_encode_tag`_. ::
PB_WT_32BIT = 5 PB_WT_32BIT = 5
} pb_wire_type_t; } pb_wire_type_t;
pb_extension_type_t
-------------------
Defines the handler functions and auxiliary data for a field that extends
another message. Usually autogenerated by *nanopb_generator.py*::
typedef struct {
bool (*decode)(pb_istream_t *stream, pb_extension_t *extension,
uint32_t tag, pb_wire_type_t wire_type);
bool (*encode)(pb_ostream_t *stream, const pb_extension_t *extension);
const void *arg;
} pb_extension_type_t;
In the normal case, the function pointers are *NULL* and the decoder and
encoder use their internal implementations. The internal implementations
assume that *arg* points to a *pb_field_t* that describes the field in question.
To implement custom processing of unknown fields, you can provide pointers
to your own functions. Their functionality is mostly the same as for normal
callback fields, except that they get called for any unknown field when decoding.
pb_extension_t
--------------
Ties together the extension field type and the storage for the field value::
typedef struct {
const pb_extension_type_t *type;
void *dest;
pb_extension_t *next;
} pb_extension_t;
:type: Pointer to the structure that defines the callback functions.
:dest: Pointer to the variable that stores the field value
(as used by the default extension callback functions.)
:next: Pointer to the next extension handler, or *NULL*.
PB_GET_ERROR
------------
Get the current error message from a stream, or a placeholder string if
there is no error message::
#define PB_GET_ERROR(stream) (string expression)
This should be used for printing errors, for example::
if (!pb_decode(...))
{
printf("Decode failed: %s\n", PB_GET_ERROR(stream));
}
The macro only returns pointers to constant strings (in code memory),
so that there is no need to release the returned pointer.
PB_RETURN_ERROR
---------------
Set the error message and return false::
#define PB_RETURN_ERROR(stream,msg) (sets error and returns false)
This should be used to handle error conditions inside nanopb functions
and user callback functions::
if (error_condition)
{
PB_RETURN_ERROR(stream, "something went wrong");
}
The *msg* parameter must be a constant string.
pb_encode.h pb_encode.h
=========== ===========
@@ -523,17 +297,6 @@ In Protocol Buffers format, the submessage size must be written before the subme
If the submessage contains callback fields, the callback function might misbehave and write out a different amount of data on the second call. This situation is recognized and *false* is returned, but garbage will be written to the output before the problem is detected. If the submessage contains callback fields, the callback function might misbehave and write out a different amount of data on the second call. This situation is recognized and *false* is returned, but garbage will be written to the output before the problem is detected.
pb_decode.h pb_decode.h
=========== ===========

14
example/Makefile Normal file
View File

@@ -0,0 +1,14 @@
CFLAGS=-ansi -Wall -Werror -I .. -g -O0
DEPS=../pb_decode.c ../pb_decode.h ../pb_encode.c ../pb_encode.h ../pb.h
all: server client
clean:
rm -f server client fileproto.pb.c fileproto.pb.h
%: %.c $(DEPS) fileproto.pb.h fileproto.pb.c
$(CC) $(CFLAGS) -o $@ $< ../pb_decode.c ../pb_encode.c fileproto.pb.c common.c
fileproto.pb.c fileproto.pb.h: fileproto.proto ../generator/nanopb_generator.py
protoc -I. -I../generator -I/usr/include -ofileproto.pb $<
python ../generator/nanopb_generator.py fileproto.pb

26
example/fileproto.proto Normal file
View File

@@ -0,0 +1,26 @@
import "nanopb.proto";
// This defines protocol for a simple server that lists files.
//
// If you come from high-level programming background, the hardcoded
// maximum lengths may disgust you. However, if your microcontroller only
// has a few kB of ram to begin with, setting reasonable limits for
// filenames is ok.
//
// On the other hand, using the callback interface, it is not necessary
// to set a limit on the number of files in the response.
message ListFilesRequest {
optional string path = 1 [default = "/", (nanopb).max_size = 128];
}
message FileInfo {
required uint64 inode = 1;
required string name = 2 [(nanopb).max_size = 128];
}
message ListFilesResponse {
optional bool path_error = 1 [default = false];
repeated FileInfo file = 2;
}

View File

@@ -0,0 +1,22 @@
CFLAGS=-Wall -Werror -I .. -g -O0
DEPS=double_conversion.c ../pb_decode.c ../pb_decode.h ../pb_encode.c ../pb_encode.h ../pb.h
all: run_tests
clean:
rm -f test_conversions encode_double decode_double doubleproto.pb.c doubleproto.pb.h
test_conversions: test_conversions.c double_conversion.c
$(CC) $(CFLAGS) -o $@ $^
%: %.c $(DEPS) doubleproto.pb.h doubleproto.pb.c
$(CC) $(CFLAGS) -o $@ $< double_conversion.c ../pb_decode.c ../pb_encode.c doubleproto.pb.c
doubleproto.pb.c doubleproto.pb.h: doubleproto.proto ../generator/nanopb_generator.py
protoc -I. -I../generator -I/usr/include -odoubleproto.pb $<
python ../generator/nanopb_generator.py doubleproto.pb
run_tests: test_conversions encode_double decode_double
./test_conversions
./encode_double | ./decode_double

View File

@@ -1,6 +1,3 @@
Nanopb example "using_double_on_avr"
====================================
Some processors/compilers, such as AVR-GCC, do not support the double Some processors/compilers, such as AVR-GCC, do not support the double
datatype. Instead, they have sizeof(double) == 4. Because protocol datatype. Instead, they have sizeof(double) == 4. Because protocol
binary format uses the double encoding directly, this causes trouble binary format uses the double encoding directly, this causes trouble
@@ -12,7 +9,7 @@ platforms. The file double_conversion.c provides functions that
convert these values to/from floats, without relying on compiler convert these values to/from floats, without relying on compiler
support. support.
To use this method, you need to make some modifications to your code: To use this method, you need to make two modifications to your code:
1) Change all 'double' fields into 'fixed64' in the .proto. 1) Change all 'double' fields into 'fixed64' in the .proto.
@@ -20,6 +17,6 @@ To use this method, you need to make some modifications to your code:
3) Whenever reading a 'double' field, use double_to_float(). 3) Whenever reading a 'double' field, use double_to_float().
The conversion routines are as accurate as the float datatype can The conversion routines should be as accurate as the float datatype can
be. Furthermore, they should handle all special values (NaN, inf, denormalized be. Furthermore, they should handle all special values (NaN, inf, denormalized
numbers) correctly. There are testcases in test_conversions.c. numbers) correctly. There are testcases in test_conversions.c.

17
example_unions/Makefile Normal file
View File

@@ -0,0 +1,17 @@
CFLAGS=-ansi -Wall -Werror -I .. -g -O0
DEPS=../pb_decode.c ../pb_decode.h ../pb_encode.c ../pb_encode.h ../pb.h
all: encode decode
./encode 1 | ./decode
./encode 2 | ./decode
./encode 3 | ./decode
clean:
rm -f encode unionproto.pb.h unionproto.pb.c
%: %.c $(DEPS) unionproto.pb.h unionproto.pb.c
$(CC) $(CFLAGS) -o $@ $< ../pb_decode.c ../pb_encode.c unionproto.pb.c
unionproto.pb.h unionproto.pb.c: unionproto.proto ../generator/nanopb_generator.py
protoc -I. -I../generator -I/usr/include -ounionproto.pb $<
python ../generator/nanopb_generator.py unionproto.pb

View File

@@ -1,19 +0,0 @@
CFLAGS = -ansi -Wall -Werror -g -O0
# Path to the nanopb root folder
NANOPB_DIR = ../..
DEPS = $(NANOPB_DIR)/pb_decode.c $(NANOPB_DIR)/pb_decode.h \
$(NANOPB_DIR)/pb_encode.c $(NANOPB_DIR)/pb_encode.h $(NANOPB_DIR)/pb.h
CFLAGS += -I$(NANOPB_DIR)
all: server client
clean:
rm -f server client fileproto.pb.c fileproto.pb.h
%: %.c $(DEPS) fileproto.pb.h fileproto.pb.c
$(CC) $(CFLAGS) -o $@ $< $(NANOPB_DIR)/pb_decode.c $(NANOPB_DIR)/pb_encode.c fileproto.pb.c common.c
fileproto.pb.c fileproto.pb.h: fileproto.proto $(NANOPB_DIR)/generator/nanopb_generator.py
protoc -ofileproto.pb $<
python $(NANOPB_DIR)/generator/nanopb_generator.py fileproto.pb

View File

@@ -1,60 +0,0 @@
Nanopb example "network_server"
===============================
This example demonstrates the use of nanopb to communicate over network
connections. It consists of a server that sends file listings, and of
a client that requests the file list from the server.
Example usage
-------------
user@host:~/nanopb/examples/network_server$ make # Build the example
protoc -ofileproto.pb fileproto.proto
python ../../generator/nanopb_generator.py fileproto.pb
Writing to fileproto.pb.h and fileproto.pb.c
cc -ansi -Wall -Werror -I .. -g -O0 -I../.. -o server server.c
../../pb_decode.c ../../pb_encode.c fileproto.pb.c common.c
cc -ansi -Wall -Werror -I .. -g -O0 -I../.. -o client client.c
../../pb_decode.c ../../pb_encode.c fileproto.pb.c common.c
user@host:~/nanopb/examples/network_server$ ./server & # Start the server on background
[1] 24462
petteri@oddish:~/nanopb/examples/network_server$ ./client /bin # Request the server to list /bin
Got connection.
Listing directory: /bin
1327119 bzdiff
1327126 bzless
1327147 ps
1327178 ntfsmove
1327271 mv
1327187 mount
1327259 false
1327266 tempfile
1327285 zfgrep
1327165 gzexe
1327204 nc.openbsd
1327260 uname
Details of implementation
-------------------------
fileproto.proto contains the portable Google Protocol Buffers protocol definition.
It could be used as-is to implement a server or a client in any other language, for
example Python or Java.
fileproto.options contains the nanopb-specific options for the protocol file. This
sets the amount of space allocated for file names when decoding messages.
common.c/h contains functions that allow nanopb to read and write directly from
network socket. This way there is no need to allocate a separate buffer to store
the message.
server.c contains the code to open a listening socket, to respond to clients and
to list directory contents.
client.c contains the code to connect to a server, to send a request and to print
the response message.
The code is implemented using the POSIX socket api, but it should be easy enough
to port into any other socket api, such as lwip.

View File

@@ -1,13 +0,0 @@
# This file defines the nanopb-specific options for the messages defined
# in fileproto.proto.
#
# If you come from high-level programming background, the hardcoded
# maximum lengths may disgust you. However, if your microcontroller only
# has a few kB of ram to begin with, setting reasonable limits for
# filenames is ok.
#
# On the other hand, using the callback interface, it is not necessary
# to set a limit on the number of files in the response.
ListFilesRequest.path max_size:128
FileInfo.name max_size:128

View File

@@ -1,18 +0,0 @@
// This defines protocol for a simple server that lists files.
//
// See also the nanopb-specific options in fileproto.options.
message ListFilesRequest {
optional string path = 1 [default = "/"];
}
message FileInfo {
required uint64 inode = 1;
required string name = 2;
}
message ListFilesResponse {
optional bool path_error = 1 [default = false];
repeated FileInfo file = 2;
}

View File

@@ -1,22 +0,0 @@
# Compiler flags to enable all warnings & debug info
CFLAGS = -Wall -Werror -g -O0
# Path to the nanopb root folder
NANOPB_DIR = ../..
CFLAGS += -I$(NANOPB_DIR)
# C source code files that are required
CSRC = simple.c # The main program
CSRC += simple.pb.c # The compiled protocol definition
CSRC += $(NANOPB_DIR)/pb_encode.c # The nanopb encoder
CSRC += $(NANOPB_DIR)/pb_decode.c # The nanopb decoder
# Build rule for the main program
simple: $(CSRC)
$(CC) $(CFLAGS) -osimple $(CSRC)
# Build rule for the protocol
simple.pb.c: simple.proto
protoc -osimple.pb simple.proto
python $(NANOPB_DIR)/generator/nanopb_generator.py simple.pb

View File

@@ -1,30 +0,0 @@
Nanopb example "simple"
=======================
This example demonstrates the very basic use of nanopb. It encodes and
decodes a simple message.
The code uses four different API functions:
* pb_ostream_from_buffer() to declare the output buffer that is to be used
* pb_encode() to encode a message
* pb_istream_from_buffer() to declare the input buffer that is to be used
* pb_decode() to decode a message
Example usage
-------------
On Linux, simply type "make" to build the example. After that, you can
run it with the command: ./simple
On other platforms, you first have to compile the protocol definition using
the following two commands::
protoc -osimple.pb simple.proto
python nanopb_generator.py simple.pb
After that, add the following four files to your project and compile:
simple.c simple.pb.c pb_encode.c pb_decode.c

View File

@@ -1,68 +0,0 @@
#include <stdio.h>
#include <pb_encode.h>
#include <pb_decode.h>
#include "simple.pb.h"
int main()
{
/* This is the buffer where we will store our message. */
uint8_t buffer[128];
size_t message_length;
bool status;
/* Encode our message */
{
/* Allocate space on the stack to store the message data.
*
* Nanopb generates simple struct definitions for all the messages.
* - check out the contents of simple.pb.h! */
SimpleMessage message;
/* Create a stream that will write to our buffer. */
pb_ostream_t stream = pb_ostream_from_buffer(buffer, sizeof(buffer));
/* Fill in the lucky number */
message.lucky_number = 13;
/* Now we are ready to encode the message! */
status = pb_encode(&stream, SimpleMessage_fields, &message);
message_length = stream.bytes_written;
/* Then just check for any errors.. */
if (!status)
{
printf("Encoding failed: %s\n", PB_GET_ERROR(&stream));
return 1;
}
}
/* Now we could transmit the message over network, store it in a file or
* wrap it to a pigeon's leg.
*/
/* But because we are lazy, we will just decode it immediately. */
{
/* Allocate space for the decoded message. */
SimpleMessage message;
/* Create a stream that reads from the buffer. */
pb_istream_t stream = pb_istream_from_buffer(buffer, message_length);
/* Now we are ready to decode the message. */
status = pb_decode(&stream, SimpleMessage_fields, &message);
/* Check for errors... */
if (!status)
{
printf("Decoding failed: %s\n", PB_GET_ERROR(&stream));
return 1;
}
/* Print the data contained in the message. */
printf("Your lucky number was %d!\n", message.lucky_number);
}
return 0;
}

View File

@@ -1,7 +0,0 @@
// A very simple protocol definition, consisting of only
// one message.
message SimpleMessage {
required int32 lucky_number = 1;
}

View File

@@ -1,29 +0,0 @@
CFLAGS = -Wall -Werror -g -O0
# Path to the nanopb root directory
NANOPB_DIR = ../..
DEPS = double_conversion.c $(NANOPB_DIR)/pb.h \
$(NANOPB_DIR)/pb_decode.c $(NANOPB_DIR)/pb_decode.h \
$(NANOPB_DIR)/pb_encode.c $(NANOPB_DIR)/pb_encode.h
CFLAGS += -I$(NANOPB_DIR)
all: run_tests
clean:
rm -f test_conversions encode_double decode_double doubleproto.pb.c doubleproto.pb.h
test_conversions: test_conversions.c double_conversion.c
$(CC) $(CFLAGS) -o $@ $^
%: %.c $(DEPS) doubleproto.pb.h doubleproto.pb.c
$(CC) $(CFLAGS) -o $@ $< double_conversion.c \
$(NANOPB_DIR)/pb_decode.c $(NANOPB_DIR)/pb_encode.c doubleproto.pb.c
doubleproto.pb.c doubleproto.pb.h: doubleproto.proto $(NANOPB_DIR)/generator/nanopb_generator.py
protoc -odoubleproto.pb $<
python $(NANOPB_DIR)/generator/nanopb_generator.py doubleproto.pb
run_tests: test_conversions encode_double decode_double
./test_conversions
./encode_double | ./decode_double

View File

@@ -1,22 +0,0 @@
CFLAGS = -ansi -Wall -Werror -g -O0
# Path to the nanopb root folder
NANOPB_DIR = ../..
DEPS = $(NANOPB_DIR)/pb_decode.c $(NANOPB_DIR)/pb_decode.h \
$(NANOPB_DIR)/pb_encode.c $(NANOPB_DIR)/pb_encode.h $(NANOPB_DIR)/pb.h
CFLAGS += -I$(NANOPB_DIR)
all: encode decode
./encode 1 | ./decode
./encode 2 | ./decode
./encode 3 | ./decode
clean:
rm -f encode unionproto.pb.h unionproto.pb.c
%: %.c $(DEPS) unionproto.pb.h unionproto.pb.c
$(CC) $(CFLAGS) -o $@ $< $(NANOPB_DIR)/pb_decode.c $(NANOPB_DIR)/pb_encode.c unionproto.pb.c
unionproto.pb.h unionproto.pb.c: unionproto.proto $(NANOPB_DIR)/generator/nanopb_generator.py
protoc -ounionproto.pb $<
python $(NANOPB_DIR)/generator/nanopb_generator.py unionproto.pb

View File

@@ -1,52 +0,0 @@
Nanopb example "using_union_messages"
=====================================
Union messages is a common technique in Google Protocol Buffers used to
represent a group of messages, only one of which is passed at a time.
It is described in Google's documentation:
https://developers.google.com/protocol-buffers/docs/techniques#union
This directory contains an example on how to encode and decode union messages
with minimal memory usage. Usually, nanopb would allocate space to store
all of the possible messages at the same time, even though at most one of
them will be used at a time.
By using some of the lower level nanopb APIs, we can manually generate the
top level message, so that we only need to allocate the one submessage that
we actually want. Similarly when decoding, we can manually read the tag of
the top level message, and only then allocate the memory for the submessage
after we already know its type.
Example usage
-------------
Type `make` to run the example. It will build it and run commands like
following:
./encode 1 | ./decode
Got MsgType1: 42
./encode 2 | ./decode
Got MsgType2: true
./encode 3 | ./decode
Got MsgType3: 3 1415
This simply demonstrates that the "decode" program has correctly identified
the type of the received message, and managed to decode it.
Details of implementation
-------------------------
unionproto.proto contains the protocol used in the example. It consists of
three messages: MsgType1, MsgType2 and MsgType3, which are collected together
into UnionMessage.
encode.c takes one command line argument, which should be a number 1-3. It
then fills in and encodes the corresponding message, and writes it to stdout.
decode.c reads a UnionMessage from stdin. Then it calls the function
decode_unionmessage_type() to determine the type of the message. After that,
the corresponding message is decoded and the contents of it printed to the
screen.

View File

@@ -1,5 +1,2 @@
nanopb_pb2.py: nanopb.proto nanopb_pb2.py: nanopb.proto
protoc --python_out=. -I /usr/include -I . nanopb.proto protoc --python_out=. -I /usr/include -I . nanopb.proto
plugin_pb2.py: plugin.proto
protoc --python_out=. -I /usr/include -I . plugin.proto

View File

@@ -33,8 +33,6 @@ message NanoPBOptions {
optional bool long_names = 4 [default = true]; optional bool long_names = 4 [default = true];
// Add 'packed' attribute to generated structs. // Add 'packed' attribute to generated structs.
// Note: this cannot be used on CPUs that break on unaligned
// accesses to variables.
optional bool packed_struct = 5 [default = false]; optional bool packed_struct = 5 [default = false];
} }

281
generator/nanopb_generator.py Executable file → Normal file
View File

@@ -1,7 +1,5 @@
#!/usr/bin/python
'''Generate header file for nanopb from a ProtoBuf FileDescriptorSet.''' '''Generate header file for nanopb from a ProtoBuf FileDescriptorSet.'''
nanopb_version = "nanopb-0.2.3" nanopb_version = "nanopb-0.2.1-dev"
try: try:
import google.protobuf.descriptor_pb2 as descriptor import google.protobuf.descriptor_pb2 as descriptor
@@ -237,20 +235,14 @@ class Field:
else: else:
return 'const %s %s_default%s = %s;' % (ctype, self.struct_name + self.name, array_decl, default) return 'const %s %s_default%s = %s;' % (ctype, self.struct_name + self.name, array_decl, default)
def tags(self):
'''Return the #define for the tag number of this field.'''
identifier = '%s_%s_tag' % (self.struct_name, self.name)
return '#define %-40s %d\n' % (identifier, self.tag)
def pb_field_t(self, prev_field_name): def pb_field_t(self, prev_field_name):
'''Return the pb_field_t initializer to use in the constant array. '''Return the pb_field_t initializer to use in the constant array.
prev_field_name is the name of the previous field or None. prev_field_name is the name of the previous field or None.
''' '''
result = ' PB_FIELD2(%3d, ' % self.tag result = ' PB_FIELD(%3d, ' % self.tag
result += '%-8s, ' % self.pbtype result += '%-8s, ' % self.pbtype
result += '%s, ' % self.rules result += '%s, ' % self.rules
result += '%s, ' % self.allocation result += '%s, ' % self.allocation
result += '%s, ' % ("FIRST" if not prev_field_name else "OTHER")
result += '%s, ' % self.struct_name result += '%s, ' % self.struct_name
result += '%s, ' % self.name result += '%s, ' % self.name
result += '%s, ' % (prev_field_name or self.name) result += '%s, ' % (prev_field_name or self.name)
@@ -278,72 +270,8 @@ class Field:
return max(self.tag, self.max_size, self.max_count) return max(self.tag, self.max_size, self.max_count)
class ExtensionRange(Field):
def __init__(self, struct_name, range_start, field_options):
'''Implements a special pb_extension_t* field in an extensible message
structure. The range_start signifies the index at which the extensions
start. Not necessarily all tags above this are extensions, it is merely
a speed optimization.
'''
self.tag = range_start
self.struct_name = struct_name
self.name = 'extensions'
self.pbtype = 'EXTENSION'
self.rules = 'OPTIONAL'
self.allocation = 'CALLBACK'
self.ctype = 'pb_extension_t'
self.array_decl = ''
self.default = None
self.max_size = 0
self.max_count = 0
def __str__(self):
return ' pb_extension_t *extensions;'
def types(self):
return None
def tags(self):
return ''
class ExtensionField(Field):
def __init__(self, struct_name, desc, field_options):
self.fullname = struct_name + desc.name
self.extendee_name = names_from_type_name(desc.extendee)
Field.__init__(self, self.fullname + 'struct', desc, field_options)
if self.rules != 'OPTIONAL':
self.skip = True
else:
self.skip = False
self.rules = 'OPTEXT'
def extension_decl(self):
'''Declaration of the extension type in the .pb.h file'''
if self.skip:
msg = '/* Extension field %s was skipped because only "optional"\n' % self.fullname
msg +=' type of extension fields is currently supported. */\n'
return msg
return 'extern const pb_extension_type_t %s;\n' % self.fullname
def extension_def(self):
'''Definition of the extension type in the .pb.c file'''
if self.skip:
return ''
result = 'typedef struct {\n'
result += str(self)
result += '\n} %s;\n\n' % self.struct_name
result += ('static const pb_field_t %s_field = \n %s;\n\n' %
(self.fullname, self.pb_field_t(None)))
result += 'const pb_extension_type_t %s = {\n' % self.fullname
result += ' NULL,\n'
result += ' NULL,\n'
result += ' &%s_field\n' % self.fullname
result += '};\n'
return result
# --------------------------------------------------------------------------- # ---------------------------------------------------------------------------
@@ -361,12 +289,6 @@ class Message:
if field_options.type != nanopb_pb2.FT_IGNORE: if field_options.type != nanopb_pb2.FT_IGNORE:
self.fields.append(Field(self.name, f, field_options)) self.fields.append(Field(self.name, f, field_options))
if len(desc.extension_range) > 0:
field_options = get_nanopb_suboptions(desc, message_options, self.name + 'extensions')
range_start = min([r.start for r in desc.extension_range])
if field_options.type != nanopb_pb2.FT_IGNORE:
self.fields.append(ExtensionRange(self.name, range_start, field_options))
self.packed = message_options.packed_struct self.packed = message_options.packed_struct
self.ordered_fields = self.fields[:] self.ordered_fields = self.fields[:]
self.ordered_fields.sort() self.ordered_fields.sort()
@@ -431,6 +353,9 @@ class Message:
# --------------------------------------------------------------------------- # ---------------------------------------------------------------------------
# Processing of entire .proto files # Processing of entire .proto files
# --------------------------------------------------------------------------- # ---------------------------------------------------------------------------
@@ -450,23 +375,11 @@ def iterate_messages(desc, names = Names()):
for x in iterate_messages(submsg, sub_names): for x in iterate_messages(submsg, sub_names):
yield x yield x
def iterate_extensions(desc, names = Names()):
'''Recursively find all extensions.
For each, yield name, FieldDescriptorProto.
'''
for extension in desc.extension:
yield names, extension
for subname, subdesc in iterate_messages(desc, names):
for extension in subdesc.extension:
yield subname, extension
def parse_file(fdesc, file_options): def parse_file(fdesc, file_options):
'''Takes a FileDescriptorProto and returns tuple (enums, messages, extensions).''' '''Takes a FileDescriptorProto and returns tuple (enum, messages).'''
enums = [] enums = []
messages = [] messages = []
extensions = []
if fdesc.package: if fdesc.package:
base_name = Names(fdesc.package.split('.')) base_name = Names(fdesc.package.split('.'))
@@ -484,11 +397,6 @@ def parse_file(fdesc, file_options):
enum_options = get_nanopb_suboptions(enum, message_options, names + enum.name) enum_options = get_nanopb_suboptions(enum, message_options, names + enum.name)
enums.append(Enum(names, enum, enum_options)) enums.append(Enum(names, enum, enum_options))
for names, extension in iterate_extensions(fdesc, base_name):
field_options = get_nanopb_suboptions(extension, file_options, names)
if field_options.type != nanopb_pb2.FT_IGNORE:
extensions.append(ExtensionField(names, extension, field_options))
# Fix field default values where enum short names are used. # Fix field default values where enum short names are used.
for enum in enums: for enum in enums:
if not enum.options.long_names: if not enum.options.long_names:
@@ -498,7 +406,7 @@ def parse_file(fdesc, file_options):
idx = enum.value_longnames.index(field.default) idx = enum.value_longnames.index(field.default)
field.default = enum.values[idx][0] field.default = enum.values[idx][0]
return enums, messages, extensions return enums, messages
def toposort2(data): def toposort2(data):
'''Topological sort. '''Topological sort.
@@ -541,7 +449,7 @@ def make_identifier(headername):
result += '_' result += '_'
return result return result
def generate_header(dependencies, headername, enums, messages, extensions, options): def generate_header(dependencies, headername, enums, messages, options):
'''Generate content for a header file. '''Generate content for a header file.
Generates strings, which should be concatenated and stored to file. Generates strings, which should be concatenated and stored to file.
''' '''
@@ -577,23 +485,11 @@ def generate_header(dependencies, headername, enums, messages, extensions, optio
yield msg.types() yield msg.types()
yield str(msg) + '\n\n' yield str(msg) + '\n\n'
if extensions:
yield '/* Extensions */\n'
for extension in extensions:
yield extension.extension_decl()
yield '\n'
yield '/* Default values for struct fields */\n' yield '/* Default values for struct fields */\n'
for msg in messages: for msg in messages:
yield msg.default_decl(True) yield msg.default_decl(True)
yield '\n' yield '\n'
yield '/* Field tags (for use in manual encoding/decoding) */\n'
for msg in sort_dependencies(messages):
for field in msg.fields:
yield field.tags()
yield '\n'
yield '/* Struct field encoding specification for nanopb */\n' yield '/* Struct field encoding specification for nanopb */\n'
for msg in messages: for msg in messages:
yield msg.fields_declaration() + '\n' yield msg.fields_declaration() + '\n'
@@ -605,7 +501,7 @@ def generate_header(dependencies, headername, enums, messages, extensions, optio
# End of header # End of header
yield '\n#endif\n' yield '\n#endif\n'
def generate_source(headername, enums, messages, extensions, options): def generate_source(headername, enums, messages):
'''Generate content for a source file.''' '''Generate content for a source file.'''
yield '/* Automatically generated nanopb constant definitions */\n' yield '/* Automatically generated nanopb constant definitions */\n'
@@ -621,10 +517,6 @@ def generate_source(headername, enums, messages, extensions, options):
for msg in messages: for msg in messages:
yield msg.fields_definition() + '\n\n' yield msg.fields_definition() + '\n\n'
for ext in extensions:
yield ext.extension_def() + '\n'
# Add checks for numeric limits
if messages: if messages:
count_required_fields = lambda m: len([f for f in msg.fields if f.rules == 'REQUIRED']) count_required_fields = lambda m: len([f for f in msg.fields if f.rules == 'REQUIRED'])
largest_msg = max(messages, key = count_required_fields) largest_msg = max(messages, key = count_required_fields)
@@ -636,6 +528,7 @@ def generate_source(headername, enums, messages, extensions, options):
yield ' setting PB_MAX_REQUIRED_FIELDS to %d or more.\n' % largest_count yield ' setting PB_MAX_REQUIRED_FIELDS to %d or more.\n' % largest_count
yield '#endif\n' yield '#endif\n'
# Add checks for numeric limits
worst = 0 worst = 0
worst_field = '' worst_field = ''
checks = [] checks = []
@@ -783,126 +676,72 @@ optparser.add_option("-v", "--verbose", dest="verbose", action="store_true", def
optparser.add_option("-s", dest="settings", metavar="OPTION:VALUE", action="append", default=[], optparser.add_option("-s", dest="settings", metavar="OPTION:VALUE", action="append", default=[],
help="Set generator option (max_size, max_count etc.).") help="Set generator option (max_size, max_count etc.).")
def process_file(filename, fdesc, options): def process(filenames, options):
'''Process a single file. '''Process the files given on the command line.'''
filename: The full path to the .proto or .pb source file, as string.
fdesc: The loaded FileDescriptorSet, or None to read from the input file.
options: Command line options as they come from OptionsParser.
Returns a dict:
{'headername': Name of header file,
'headerdata': Data for the .h header file,
'sourcename': Name of the source code file,
'sourcedata': Data for the .c source code file
}
'''
toplevel_options = nanopb_pb2.NanoPBOptions()
for s in options.settings:
text_format.Merge(s, toplevel_options)
if not fdesc:
data = open(filename, 'rb').read()
fdesc = descriptor.FileDescriptorSet.FromString(data).file[0]
# Check if there is a separate .options file
try:
optfilename = options.options_file % os.path.splitext(filename)[0]
except TypeError:
# No %s specified, use the filename as-is
optfilename = options.options_file
if options.verbose:
print 'Reading options from ' + optfilename
if os.path.isfile(optfilename):
Globals.separate_options = read_options_file(open(optfilename, "rU"))
else:
Globals.separate_options = []
# Parse the file
file_options = get_nanopb_suboptions(fdesc, toplevel_options, Names([filename]))
enums, messages, extensions = parse_file(fdesc, file_options)
# Decide the file names
noext = os.path.splitext(filename)[0]
headername = noext + '.' + options.extension + '.h'
sourcename = noext + '.' + options.extension + '.c'
headerbasename = os.path.basename(headername)
# List of .proto files that should not be included in the C header file
# even if they are mentioned in the source .proto.
excludes = ['nanopb.proto', 'google/protobuf/descriptor.proto'] + options.exclude
dependencies = [d for d in fdesc.dependency if d not in excludes]
headerdata = ''.join(generate_header(dependencies, headerbasename, enums,
messages, extensions, options))
sourcedata = ''.join(generate_source(headerbasename, enums,
messages, extensions, options))
return {'headername': headername, 'headerdata': headerdata,
'sourcename': sourcename, 'sourcedata': sourcedata}
def main_cli():
'''Main function when invoked directly from the command line.'''
options, filenames = optparser.parse_args()
if not filenames: if not filenames:
optparser.print_help() optparser.print_help()
sys.exit(1) return False
if options.quiet: if options.quiet:
options.verbose = False options.verbose = False
Globals.verbose_options = options.verbose Globals.verbose_options = options.verbose
toplevel_options = nanopb_pb2.NanoPBOptions()
for s in options.settings:
text_format.Merge(s, toplevel_options)
for filename in filenames: for filename in filenames:
results = process_file(filename, None, options) data = open(filename, 'rb').read()
fdesc = descriptor.FileDescriptorSet.FromString(data)
# Check if any separate options are specified
try:
optfilename = options.options_file % os.path.splitext(filename)[0]
except TypeError:
# No %s specified, use the filename as-is
optfilename = options.options_file
if options.verbose:
print 'Reading options from ' + optfilename
if os.path.isfile(optfilename):
Globals.separate_options = read_options_file(open(optfilename, "rU"))
else:
Globals.separate_options = []
# Parse the file
file_options = get_nanopb_suboptions(fdesc.file[0], toplevel_options, Names([filename]))
enums, messages = parse_file(fdesc.file[0], file_options)
noext = os.path.splitext(filename)[0]
headername = noext + '.' + options.extension + '.h'
sourcename = noext + '.' + options.extension + '.c'
headerbasename = os.path.basename(headername)
if not options.quiet: if not options.quiet:
print "Writing to " + results['headername'] + " and " + results['sourcename'] print "Writing to " + headername + " and " + sourcename
open(results['headername'], 'w').write(results['headerdata']) # List of .proto files that should not be included in the C header file
open(results['sourcename'], 'w').write(results['sourcedata']) # even if they are mentioned in the source .proto.
excludes = ['nanopb.proto', 'google/protobuf/descriptor.proto'] + options.exclude
dependencies = [d for d in fdesc.file[0].dependency if d not in excludes]
def main_plugin(): header = open(headername, 'w')
'''Main function when invoked as a protoc plugin.''' for part in generate_header(dependencies, headerbasename, enums, messages, options):
header.write(part)
import plugin_pb2 source = open(sourcename, 'w')
data = sys.stdin.read() for part in generate_source(headerbasename, enums, messages):
request = plugin_pb2.CodeGeneratorRequest.FromString(data) source.write(part)
import shlex return True
args = shlex.split(request.parameter)
options, dummy = optparser.parse_args(args)
# We can't go printing stuff to stdout
Globals.verbose_options = False
options.verbose = False
options.quiet = True
response = plugin_pb2.CodeGeneratorResponse()
for filename in request.file_to_generate:
for fdesc in request.proto_file:
if fdesc.name == filename:
results = process_file(filename, fdesc, options)
f = response.file.add()
f.name = results['headername']
f.content = results['headerdata']
f = response.file.add()
f.name = results['sourcename']
f.content = results['sourcedata']
sys.stdout.write(response.SerializeToString())
if __name__ == '__main__': if __name__ == '__main__':
# Check if we are running as a plugin under protoc options, filenames = optparser.parse_args()
if 'protoc-gen-' in sys.argv[0]: status = process(filenames, options)
main_plugin()
else: if not status:
main_cli() sys.exit(1)

View File

@@ -1,145 +0,0 @@
// Protocol Buffers - Google's data interchange format
// Copyright 2008 Google Inc. All rights reserved.
// http://code.google.com/p/protobuf/
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// Author: kenton@google.com (Kenton Varda)
//
// WARNING: The plugin interface is currently EXPERIMENTAL and is subject to
// change.
//
// protoc (aka the Protocol Compiler) can be extended via plugins. A plugin is
// just a program that reads a CodeGeneratorRequest from stdin and writes a
// CodeGeneratorResponse to stdout.
//
// Plugins written using C++ can use google/protobuf/compiler/plugin.h instead
// of dealing with the raw protocol defined here.
//
// A plugin executable needs only to be placed somewhere in the path. The
// plugin should be named "protoc-gen-$NAME", and will then be used when the
// flag "--${NAME}_out" is passed to protoc.
package google.protobuf.compiler;
import "google/protobuf/descriptor.proto";
// An encoded CodeGeneratorRequest is written to the plugin's stdin.
message CodeGeneratorRequest {
// The .proto files that were explicitly listed on the command-line. The
// code generator should generate code only for these files. Each file's
// descriptor will be included in proto_file, below.
repeated string file_to_generate = 1;
// The generator parameter passed on the command-line.
optional string parameter = 2;
// FileDescriptorProtos for all files in files_to_generate and everything
// they import. The files will appear in topological order, so each file
// appears before any file that imports it.
//
// protoc guarantees that all proto_files will be written after
// the fields above, even though this is not technically guaranteed by the
// protobuf wire format. This theoretically could allow a plugin to stream
// in the FileDescriptorProtos and handle them one by one rather than read
// the entire set into memory at once. However, as of this writing, this
// is not similarly optimized on protoc's end -- it will store all fields in
// memory at once before sending them to the plugin.
repeated FileDescriptorProto proto_file = 15;
}
// The plugin writes an encoded CodeGeneratorResponse to stdout.
message CodeGeneratorResponse {
// Error message. If non-empty, code generation failed. The plugin process
// should exit with status code zero even if it reports an error in this way.
//
// This should be used to indicate errors in .proto files which prevent the
// code generator from generating correct code. Errors which indicate a
// problem in protoc itself -- such as the input CodeGeneratorRequest being
// unparseable -- should be reported by writing a message to stderr and
// exiting with a non-zero status code.
optional string error = 1;
// Represents a single generated file.
message File {
// The file name, relative to the output directory. The name must not
// contain "." or ".." components and must be relative, not be absolute (so,
// the file cannot lie outside the output directory). "/" must be used as
// the path separator, not "\".
//
// If the name is omitted, the content will be appended to the previous
// file. This allows the generator to break large files into small chunks,
// and allows the generated text to be streamed back to protoc so that large
// files need not reside completely in memory at one time. Note that as of
// this writing protoc does not optimize for this -- it will read the entire
// CodeGeneratorResponse before writing files to disk.
optional string name = 1;
// If non-empty, indicates that the named file should already exist, and the
// content here is to be inserted into that file at a defined insertion
// point. This feature allows a code generator to extend the output
// produced by another code generator. The original generator may provide
// insertion points by placing special annotations in the file that look
// like:
// @@protoc_insertion_point(NAME)
// The annotation can have arbitrary text before and after it on the line,
// which allows it to be placed in a comment. NAME should be replaced with
// an identifier naming the point -- this is what other generators will use
// as the insertion_point. Code inserted at this point will be placed
// immediately above the line containing the insertion point (thus multiple
// insertions to the same point will come out in the order they were added).
// The double-@ is intended to make it unlikely that the generated code
// could contain things that look like insertion points by accident.
//
// For example, the C++ code generator places the following line in the
// .pb.h files that it generates:
// // @@protoc_insertion_point(namespace_scope)
// This line appears within the scope of the file's package namespace, but
// outside of any particular class. Another plugin can then specify the
// insertion_point "namespace_scope" to generate additional classes or
// other declarations that should be placed in this scope.
//
// Note that if the line containing the insertion point begins with
// whitespace, the same whitespace will be added to every line of the
// inserted text. This is useful for languages like Python, where
// indentation matters. In these languages, the insertion point comment
// should be indented the same amount as any inserted code will need to be
// in order to work correctly in that context.
//
// The code generator that generates the initial file and the one which
// inserts into it must both run as part of a single invocation of protoc.
// Code generators are executed in the order in which they appear on the
// command line.
//
// If |insertion_point| is present, |name| must also be present.
optional string insertion_point = 2;
// The file contents.
optional string content = 15;
}
repeated File file = 15;
}

View File

@@ -1,161 +0,0 @@
# Generated by the protocol buffer compiler. DO NOT EDIT!
from google.protobuf import descriptor
from google.protobuf import message
from google.protobuf import reflection
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
import google.protobuf.descriptor_pb2
DESCRIPTOR = descriptor.FileDescriptor(
name='plugin.proto',
package='google.protobuf.compiler',
serialized_pb='\n\x0cplugin.proto\x12\x18google.protobuf.compiler\x1a google/protobuf/descriptor.proto\"}\n\x14\x43odeGeneratorRequest\x12\x18\n\x10\x66ile_to_generate\x18\x01 \x03(\t\x12\x11\n\tparameter\x18\x02 \x01(\t\x12\x38\n\nproto_file\x18\x0f \x03(\x0b\x32$.google.protobuf.FileDescriptorProto\"\xaa\x01\n\x15\x43odeGeneratorResponse\x12\r\n\x05\x65rror\x18\x01 \x01(\t\x12\x42\n\x04\x66ile\x18\x0f \x03(\x0b\x32\x34.google.protobuf.compiler.CodeGeneratorResponse.File\x1a>\n\x04\x46ile\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x17\n\x0finsertion_point\x18\x02 \x01(\t\x12\x0f\n\x07\x63ontent\x18\x0f \x01(\t')
_CODEGENERATORREQUEST = descriptor.Descriptor(
name='CodeGeneratorRequest',
full_name='google.protobuf.compiler.CodeGeneratorRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='file_to_generate', full_name='google.protobuf.compiler.CodeGeneratorRequest.file_to_generate', index=0,
number=1, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='parameter', full_name='google.protobuf.compiler.CodeGeneratorRequest.parameter', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='proto_file', full_name='google.protobuf.compiler.CodeGeneratorRequest.proto_file', index=2,
number=15, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=76,
serialized_end=201,
)
_CODEGENERATORRESPONSE_FILE = descriptor.Descriptor(
name='File',
full_name='google.protobuf.compiler.CodeGeneratorResponse.File',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='name', full_name='google.protobuf.compiler.CodeGeneratorResponse.File.name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='insertion_point', full_name='google.protobuf.compiler.CodeGeneratorResponse.File.insertion_point', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='content', full_name='google.protobuf.compiler.CodeGeneratorResponse.File.content', index=2,
number=15, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=312,
serialized_end=374,
)
_CODEGENERATORRESPONSE = descriptor.Descriptor(
name='CodeGeneratorResponse',
full_name='google.protobuf.compiler.CodeGeneratorResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='error', full_name='google.protobuf.compiler.CodeGeneratorResponse.error', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='file', full_name='google.protobuf.compiler.CodeGeneratorResponse.file', index=1,
number=15, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[_CODEGENERATORRESPONSE_FILE, ],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=204,
serialized_end=374,
)
_CODEGENERATORREQUEST.fields_by_name['proto_file'].message_type = google.protobuf.descriptor_pb2._FILEDESCRIPTORPROTO
_CODEGENERATORRESPONSE_FILE.containing_type = _CODEGENERATORRESPONSE;
_CODEGENERATORRESPONSE.fields_by_name['file'].message_type = _CODEGENERATORRESPONSE_FILE
DESCRIPTOR.message_types_by_name['CodeGeneratorRequest'] = _CODEGENERATORREQUEST
DESCRIPTOR.message_types_by_name['CodeGeneratorResponse'] = _CODEGENERATORRESPONSE
class CodeGeneratorRequest(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _CODEGENERATORREQUEST
# @@protoc_insertion_point(class_scope:google.protobuf.compiler.CodeGeneratorRequest)
class CodeGeneratorResponse(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
class File(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _CODEGENERATORRESPONSE_FILE
# @@protoc_insertion_point(class_scope:google.protobuf.compiler.CodeGeneratorResponse.File)
DESCRIPTOR = _CODEGENERATORRESPONSE
# @@protoc_insertion_point(class_scope:google.protobuf.compiler.CodeGeneratorResponse)
# @@protoc_insertion_point(module_scope)

214
pb.h
View File

@@ -1,61 +1,13 @@
/* Common parts of the nanopb library. Most of these are quite low-level
* stuff. For the high-level interface, see pb_encode.h and pb_decode.h.
*/
#ifndef _PB_H_ #ifndef _PB_H_
#define _PB_H_ #define _PB_H_
/***************************************************************** /* pb.h: Common parts for nanopb library.
* Nanopb compilation time options. You can change these here by * * Most of these are quite low-level stuff. For the high-level interface,
* uncommenting the lines, or on the compiler command line. * * see pb_encode.h or pb_decode.h
*****************************************************************/
/* Define this if your CPU architecture is big endian, i.e. it
* stores the most-significant byte first. */
/* #define __BIG_ENDIAN__ 1 */
/* Increase the number of required fields that are tracked.
* A compiler warning will tell if you need this. */
/* #define PB_MAX_REQUIRED_FIELDS 256 */
/* Add support for tag numbers > 255 and fields larger than 255 bytes. */
/* #define PB_FIELD_16BIT 1 */
/* Add support for tag numbers > 65536 and fields larger than 65536 bytes. */
/* #define PB_FIELD_32BIT 1 */
/* Disable support for error messages in order to save some code space. */
/* #define PB_NO_ERRMSG 1 */
/* Disable support for custom streams (support only memory buffers). */
/* #define PB_BUFFER_ONLY 1 */
/* Switch back to the old-style callback function signature.
* This was the default until nanopb-0.2.1. */
/* #define PB_OLD_CALLBACK_STYLE */
/******************************************************************
* You usually don't need to change anything below this line. *
* Feel free to look around and use the defined macros, though. *
******************************************************************/
/* Version of the nanopb library. Just in case you want to check it in
* your own program. */
#define NANOPB_VERSION nanopb-0.2.3
/* Include all the system headers needed by nanopb. You will need the
* definitions of the following:
* - strlen, memcpy, memset functions
* - [u]int8_t, [u]int16_t, [u]int32_t, [u]int64_t
* - size_t
* - bool
*
* If you don't have the standard header files, you can instead provide
* a custom header that defines or includes all this. In that case,
* define PB_SYSTEM_HEADER to the path of this file.
*/ */
#define NANOPB_VERSION nanopb-0.2.1-dev
#ifdef PB_SYSTEM_HEADER #ifdef PB_SYSTEM_HEADER
#include PB_SYSTEM_HEADER #include PB_SYSTEM_HEADER
#else #else
@@ -78,7 +30,7 @@
# define PB_PACKED_STRUCT_START _Pragma("pack(push, 1)") # define PB_PACKED_STRUCT_START _Pragma("pack(push, 1)")
# define PB_PACKED_STRUCT_END _Pragma("pack(pop)") # define PB_PACKED_STRUCT_END _Pragma("pack(pop)")
# define pb_packed # define pb_packed
#elif defined(_MSC_VER) && (_MSC_VER >= 1500) #elif defined(_MSC_VER)
/* For Microsoft Visual C++ */ /* For Microsoft Visual C++ */
# define PB_PACKED_STRUCT_START __pragma(pack(push, 1)) # define PB_PACKED_STRUCT_START __pragma(pack(push, 1))
# define PB_PACKED_STRUCT_END __pragma(pack(pop)) # define PB_PACKED_STRUCT_END __pragma(pack(pop))
@@ -90,7 +42,7 @@
# define pb_packed # define pb_packed
#endif #endif
/* Handly macro for suppressing unreferenced-parameter compiler warnings. */ /* Handly macro for suppressing unreferenced-parameter compiler warnings. */
#ifndef UNUSED #ifndef UNUSED
#define UNUSED(x) (void)(x) #define UNUSED(x) (void)(x)
#endif #endif
@@ -104,7 +56,8 @@
#define STATIC_ASSERT_MSG_(MSG, LINE, COUNTER) static_assertion_##MSG##LINE##COUNTER #define STATIC_ASSERT_MSG_(MSG, LINE, COUNTER) static_assertion_##MSG##LINE##COUNTER
#endif #endif
/* Number of required fields to keep track of. */ /* Number of required fields to keep track of
* (change here or on compiler command line). */
#ifndef PB_MAX_REQUIRED_FIELDS #ifndef PB_MAX_REQUIRED_FIELDS
#define PB_MAX_REQUIRED_FIELDS 64 #define PB_MAX_REQUIRED_FIELDS 64
#endif #endif
@@ -125,7 +78,9 @@
typedef uint8_t pb_type_t; typedef uint8_t pb_type_t;
/**** Field data types ****/ /************************
* Field contents types *
************************/
/* Numeric types */ /* Numeric types */
#define PB_LTYPE_VARINT 0x00 /* int32, uint32, int64, uint64, bool, enum */ #define PB_LTYPE_VARINT 0x00 /* int32, uint32, int64, uint64, bool, enum */
@@ -148,22 +103,22 @@ typedef uint8_t pb_type_t;
* submsg_fields is pointer to field descriptions */ * submsg_fields is pointer to field descriptions */
#define PB_LTYPE_SUBMESSAGE 0x06 #define PB_LTYPE_SUBMESSAGE 0x06
/* Extension pseudo-field
* The field contains a pointer to pb_extension_t */
#define PB_LTYPE_EXTENSION 0x07
/* Number of declared LTYPES */ /* Number of declared LTYPES */
#define PB_LTYPES_COUNT 8 #define PB_LTYPES_COUNT 7
#define PB_LTYPE_MASK 0x0F #define PB_LTYPE_MASK 0x0F
/**** Field repetition rules ****/ /**************************
* Field repetition rules *
**************************/
#define PB_HTYPE_REQUIRED 0x00 #define PB_HTYPE_REQUIRED 0x00
#define PB_HTYPE_OPTIONAL 0x10 #define PB_HTYPE_OPTIONAL 0x10
#define PB_HTYPE_REPEATED 0x20 #define PB_HTYPE_REPEATED 0x20
#define PB_HTYPE_MASK 0x30 #define PB_HTYPE_MASK 0x30
/**** Field allocation types ****/ /********************
* Allocation types *
********************/
#define PB_ATYPE_STATIC 0x00 #define PB_ATYPE_STATIC 0x00
#define PB_ATYPE_CALLBACK 0x40 #define PB_ATYPE_CALLBACK 0x40
@@ -215,17 +170,6 @@ struct _pb_field_t {
} pb_packed; } pb_packed;
PB_PACKED_STRUCT_END PB_PACKED_STRUCT_END
/* Make sure that the standard integer types are of the expected sizes.
* All kinds of things may break otherwise.. atleast all fixed* types. */
STATIC_ASSERT(sizeof(int8_t) == 1, INT8_T_WRONG_SIZE)
STATIC_ASSERT(sizeof(uint8_t) == 1, UINT8_T_WRONG_SIZE)
STATIC_ASSERT(sizeof(int16_t) == 2, INT16_T_WRONG_SIZE)
STATIC_ASSERT(sizeof(uint16_t) == 2, UINT16_T_WRONG_SIZE)
STATIC_ASSERT(sizeof(int32_t) == 4, INT32_T_WRONG_SIZE)
STATIC_ASSERT(sizeof(uint32_t) == 4, UINT32_T_WRONG_SIZE)
STATIC_ASSERT(sizeof(int64_t) == 8, INT64_T_WRONG_SIZE)
STATIC_ASSERT(sizeof(uint64_t) == 8, UINT64_T_WRONG_SIZE)
/* This structure is used for 'bytes' arrays. /* This structure is used for 'bytes' arrays.
* It has the number of bytes in the beginning, and after that an array. * It has the number of bytes in the beginning, and after that an array.
* Note that actual structs used will have a different length of bytes array. * Note that actual structs used will have a different length of bytes array.
@@ -285,117 +229,50 @@ typedef enum {
PB_WT_32BIT = 5 PB_WT_32BIT = 5
} pb_wire_type_t; } pb_wire_type_t;
/* Structure for defining the handling of unknown/extension fields.
* Usually the pb_extension_type_t structure is automatically generated,
* while the pb_extension_t structure is created by the user. However,
* if you want to catch all unknown fields, you can also create a custom
* pb_extension_type_t with your own callback.
*/
typedef struct _pb_extension_type_t pb_extension_type_t;
typedef struct _pb_extension_t pb_extension_t;
struct _pb_extension_type_t {
/* Called for each unknown field in the message.
* If you handle the field, read off all of its data and return true.
* If you do not handle the field, do not read anything and return true.
* If you run into an error, return false.
* Set to NULL for default handler.
*/
bool (*decode)(pb_istream_t *stream, pb_extension_t *extension,
uint32_t tag, pb_wire_type_t wire_type);
/* Called once after all regular fields have been encoded.
* If you have something to write, do so and return true.
* If you do not have anything to write, just return true.
* If you run into an error, return false.
* Set to NULL for default handler.
*/
bool (*encode)(pb_ostream_t *stream, const pb_extension_t *extension);
/* Free field for use by the callback. */
const void *arg;
};
struct _pb_extension_t {
/* Type describing the extension field. Usually you'll initialize
* this to a pointer to the automatically generated structure. */
const pb_extension_type_t *type;
/* Destination for the decoded data. This must match the datatype
* of the extension field. */
void *dest;
/* Pointer to the next extension handler, or NULL.
* If this extension does not match a field, the next handler is
* automatically called. */
pb_extension_t *next;
};
/* These macros are used to declare pb_field_t's in the constant array. */ /* These macros are used to declare pb_field_t's in the constant array. */
/* Size of a structure member, in bytes. */
#define pb_membersize(st, m) (sizeof ((st*)0)->m) #define pb_membersize(st, m) (sizeof ((st*)0)->m)
/* Number of entries in an array. */
#define pb_arraysize(st, m) (pb_membersize(st, m) / pb_membersize(st, m[0])) #define pb_arraysize(st, m) (pb_membersize(st, m) / pb_membersize(st, m[0]))
/* Delta from start of one member to the start of another member. */
#define pb_delta(st, m1, m2) ((int)offsetof(st, m1) - (int)offsetof(st, m2)) #define pb_delta(st, m1, m2) ((int)offsetof(st, m1) - (int)offsetof(st, m2))
/* Marks the end of the field list */ #define pb_delta_end(st, m1, m2) (int)(offsetof(st, m1) == offsetof(st, m2) \
? offsetof(st, m1) \
: offsetof(st, m1) - offsetof(st, m2) - pb_membersize(st, m2))
#define PB_LAST_FIELD {0,(pb_type_t) 0,0,0,0,0,0} #define PB_LAST_FIELD {0,(pb_type_t) 0,0,0,0,0,0}
/* Macros for filling in the data_offset field */
/* data_offset for first field in a message */
#define PB_DATAOFFSET_FIRST(st, m1, m2) (offsetof(st, m1))
/* data_offset for subsequent fields */
#define PB_DATAOFFSET_OTHER(st, m1, m2) (offsetof(st, m1) - offsetof(st, m2) - pb_membersize(st, m2))
/* Choose first/other based on m1 == m2 (deprecated, remains for backwards compatibility) */
#define PB_DATAOFFSET_CHOOSE(st, m1, m2) (int)(offsetof(st, m1) == offsetof(st, m2) \
? PB_DATAOFFSET_FIRST(st, m1, m2) \
: PB_DATAOFFSET_OTHER(st, m1, m2))
/* Required fields are the simplest. They just have delta (padding) from /* Required fields are the simplest. They just have delta (padding) from
* previous field end, and the size of the field. Pointer is used for * previous field end, and the size of the field. Pointer is used for
* submessages and default values. * submessages and default values.
*/ */
#define PB_REQUIRED_STATIC(tag, st, m, fd, ltype, ptr) \ #define PB_REQUIRED_STATIC(tag, st, m, pm, ltype, ptr) \
{tag, PB_ATYPE_STATIC | PB_HTYPE_REQUIRED | ltype, \ {tag, PB_ATYPE_STATIC | PB_HTYPE_REQUIRED | ltype, \
fd, 0, pb_membersize(st, m), 0, ptr} pb_delta_end(st, m, pm), 0, pb_membersize(st, m), 0, ptr}
/* Optional fields add the delta to the has_ variable. */ /* Optional fields add the delta to the has_ variable. */
#define PB_OPTIONAL_STATIC(tag, st, m, fd, ltype, ptr) \ #define PB_OPTIONAL_STATIC(tag, st, m, pm, ltype, ptr) \
{tag, PB_ATYPE_STATIC | PB_HTYPE_OPTIONAL | ltype, \ {tag, PB_ATYPE_STATIC | PB_HTYPE_OPTIONAL | ltype, \
fd, \ pb_delta_end(st, m, pm), \
pb_delta(st, has_ ## m, m), \ pb_delta(st, has_ ## m, m), \
pb_membersize(st, m), 0, ptr} pb_membersize(st, m), 0, ptr}
/* Repeated fields have a _count field and also the maximum number of entries. */ /* Repeated fields have a _count field and also the maximum number of entries. */
#define PB_REPEATED_STATIC(tag, st, m, fd, ltype, ptr) \ #define PB_REPEATED_STATIC(tag, st, m, pm, ltype, ptr) \
{tag, PB_ATYPE_STATIC | PB_HTYPE_REPEATED | ltype, \ {tag, PB_ATYPE_STATIC | PB_HTYPE_REPEATED | ltype, \
fd, \ pb_delta_end(st, m, pm), \
pb_delta(st, m ## _count, m), \ pb_delta(st, m ## _count, m), \
pb_membersize(st, m[0]), \ pb_membersize(st, m[0]), \
pb_arraysize(st, m), ptr} pb_arraysize(st, m), ptr}
/* Callbacks are much like required fields except with special datatype. */ /* Callbacks are much like required fields except with special datatype. */
#define PB_REQUIRED_CALLBACK(tag, st, m, fd, ltype, ptr) \ #define PB_REQUIRED_CALLBACK(tag, st, m, pm, ltype, ptr) \
{tag, PB_ATYPE_CALLBACK | PB_HTYPE_REQUIRED | ltype, \ {tag, PB_ATYPE_CALLBACK | PB_HTYPE_REQUIRED | ltype, \
fd, 0, pb_membersize(st, m), 0, ptr} pb_delta_end(st, m, pm), 0, pb_membersize(st, m), 0, ptr}
#define PB_OPTIONAL_CALLBACK(tag, st, m, fd, ltype, ptr) \ #define PB_OPTIONAL_CALLBACK(tag, st, m, pm, ltype, ptr) \
{tag, PB_ATYPE_CALLBACK | PB_HTYPE_OPTIONAL | ltype, \ {tag, PB_ATYPE_CALLBACK | PB_HTYPE_OPTIONAL | ltype, \
fd, 0, pb_membersize(st, m), 0, ptr} pb_delta_end(st, m, pm), 0, pb_membersize(st, m), 0, ptr}
#define PB_REPEATED_CALLBACK(tag, st, m, fd, ltype, ptr) \ #define PB_REPEATED_CALLBACK(tag, st, m, pm, ltype, ptr) \
{tag, PB_ATYPE_CALLBACK | PB_HTYPE_REPEATED | ltype, \ {tag, PB_ATYPE_CALLBACK | PB_HTYPE_REPEATED | ltype, \
fd, 0, pb_membersize(st, m), 0, ptr} pb_delta_end(st, m, pm), 0, pb_membersize(st, m), 0, ptr}
/* Optional extensions don't have the has_ field, as that would be redundant. */
#define PB_OPTEXT_STATIC(tag, st, m, fd, ltype, ptr) \
{tag, PB_ATYPE_STATIC | PB_HTYPE_OPTIONAL | ltype, \
0, \
0, \
pb_membersize(st, m), 0, ptr}
#define PB_OPTEXT_CALLBACK(tag, st, m, fd, ltype, ptr) \
{tag, PB_ATYPE_CALLBACK | PB_HTYPE_OPTIONAL | ltype, \
0, 0, pb_membersize(st, m), 0, ptr}
/* The mapping from protobuf types to LTYPEs is done using these macros. */ /* The mapping from protobuf types to LTYPEs is done using these macros. */
#define PB_LTYPE_MAP_BOOL PB_LTYPE_VARINT #define PB_LTYPE_MAP_BOOL PB_LTYPE_VARINT
@@ -415,14 +292,13 @@ struct _pb_extension_t {
#define PB_LTYPE_MAP_STRING PB_LTYPE_STRING #define PB_LTYPE_MAP_STRING PB_LTYPE_STRING
#define PB_LTYPE_MAP_UINT32 PB_LTYPE_VARINT #define PB_LTYPE_MAP_UINT32 PB_LTYPE_VARINT
#define PB_LTYPE_MAP_UINT64 PB_LTYPE_VARINT #define PB_LTYPE_MAP_UINT64 PB_LTYPE_VARINT
#define PB_LTYPE_MAP_EXTENSION PB_LTYPE_EXTENSION
/* This is the actual macro used in field descriptions. /* This is the actual macro used in field descriptions.
* It takes these arguments: * It takes these arguments:
* - Field tag number * - Field tag number
* - Field type: BOOL, BYTES, DOUBLE, ENUM, FIXED32, FIXED64, * - Field type: BOOL, BYTES, DOUBLE, ENUM, FIXED32, FIXED64,
* FLOAT, INT32, INT64, MESSAGE, SFIXED32, SFIXED64 * FLOAT, INT32, INT64, MESSAGE, SFIXED32, SFIXED64
* SINT32, SINT64, STRING, UINT32, UINT64 or EXTENSION * SINT32, SINT64, STRING, UINT32 or UINT64
* - Field rules: REQUIRED, OPTIONAL or REPEATED * - Field rules: REQUIRED, OPTIONAL or REPEATED
* - Allocation: STATIC or CALLBACK * - Allocation: STATIC or CALLBACK
* - Message name * - Message name
@@ -432,22 +308,8 @@ struct _pb_extension_t {
*/ */
#define PB_FIELD(tag, type, rules, allocation, message, field, prevfield, ptr) \ #define PB_FIELD(tag, type, rules, allocation, message, field, prevfield, ptr) \
PB_ ## rules ## _ ## allocation(tag, message, field, \ PB_ ## rules ## _ ## allocation(tag, message, field, prevfield, \
PB_DATAOFFSET_CHOOSE(message, field, prevfield), \ PB_LTYPE_MAP_ ## type, ptr)
PB_LTYPE_MAP_ ## type, ptr)
/* This is a new version of the macro used by nanopb generator from
* version 0.2.3 onwards. It avoids the use of a ternary expression in
* the initialization, which confused some compilers.
*
* - Placement: FIRST or OTHER, depending on if this is the first field in structure.
*
*/
#define PB_FIELD2(tag, type, rules, allocation, placement, message, field, prevfield, ptr) \
PB_ ## rules ## _ ## allocation(tag, message, field, \
PB_DATAOFFSET_ ## placement(message, field, prevfield), \
PB_LTYPE_MAP_ ## type, ptr)
/* These macros are used for giving out error messages. /* These macros are used for giving out error messages.
* They are mostly a debugging aid; the main error information * They are mostly a debugging aid; the main error information

View File

@@ -28,8 +28,7 @@ static const pb_decoder_t PB_DECODERS[PB_LTYPES_COUNT] = {
&pb_dec_bytes, &pb_dec_bytes,
&pb_dec_string, &pb_dec_string,
&pb_dec_submessage, &pb_dec_submessage
NULL /* extensions */
}; };
/************** /**************
@@ -309,12 +308,12 @@ static bool pb_field_next(pb_field_iterator_t *iter)
prev_size *= iter->pos->array_size; prev_size *= iter->pos->array_size;
} }
if (iter->pos->tag == 0)
return false; /* Only happens with empty message types */
if (PB_HTYPE(iter->pos->type) == PB_HTYPE_REQUIRED) if (PB_HTYPE(iter->pos->type) == PB_HTYPE_REQUIRED)
iter->required_field_index++; iter->required_field_index++;
if (iter->pos->tag == 0)
return false; /* Only happens with empty message types */
iter->pos++; iter->pos++;
iter->field_index++; iter->field_index++;
if (iter->pos->tag == 0) if (iter->pos->tag == 0)
@@ -332,22 +331,6 @@ static bool pb_field_next(pb_field_iterator_t *iter)
return notwrapped; return notwrapped;
} }
static bool checkreturn pb_field_find(pb_field_iterator_t *iter, uint32_t tag)
{
unsigned start = iter->field_index;
do {
if (iter->pos->tag == tag &&
PB_LTYPE(iter->pos->type) != PB_LTYPE_EXTENSION)
{
return true;
}
pb_field_next(iter);
} while (iter->field_index != start);
return false;
}
/************************* /*************************
* Decode a single field * * Decode a single field *
*************************/ *************************/
@@ -434,11 +417,11 @@ static bool checkreturn decode_callback_field(pb_istream_t *stream, pb_wire_type
if (!pb_make_string_substream(stream, &substream)) if (!pb_make_string_substream(stream, &substream))
return false; return false;
do while (substream.bytes_left)
{ {
if (!pCallback->funcs.decode(&substream, iter->pos, arg)) if (!pCallback->funcs.decode(&substream, iter->pos, arg))
PB_RETURN_ERROR(stream, "callback failed"); PB_RETURN_ERROR(stream, "callback failed");
} while (substream.bytes_left); }
pb_close_string_substream(stream, &substream); pb_close_string_substream(stream, &substream);
return true; return true;
@@ -476,136 +459,105 @@ static bool checkreturn decode_field(pb_istream_t *stream, pb_wire_type_t wire_t
} }
} }
/* Default handler for extension fields. Expects a pb_field_t structure /* Set field count to zero (or clear has_ field). */
* in extension->type->arg. */ static void pb_clear_field_count(const pb_field_iterator_t *iter)
static bool checkreturn default_extension_handler(pb_istream_t *stream,
pb_extension_t *extension, uint32_t tag, pb_wire_type_t wire_type)
{ {
const pb_field_t *field = (const pb_field_t*)extension->type->arg; pb_type_t type;
pb_field_iterator_t iter; type = iter->pos->type;
bool dummy;
if (field->tag != tag) if (iter->pos->tag == 0)
return true; return; /* Empty message type */
iter.start = field; if (PB_ATYPE(type) == PB_ATYPE_STATIC)
iter.pos = field;
iter.field_index = 0;
iter.required_field_index = 0;
iter.dest_struct = extension->dest;
iter.pData = extension->dest;
iter.pSize = &dummy;
return decode_field(stream, wire_type, &iter);
}
/* Try to decode an unknown field as an extension field. Tries each extension
* decoder in turn, until one of them handles the field or loop ends. */
static bool checkreturn decode_extension(pb_istream_t *stream,
uint32_t tag, pb_wire_type_t wire_type, pb_field_iterator_t *iter)
{
pb_extension_t *extension = *(pb_extension_t* const *)iter->pData;
size_t pos = stream->bytes_left;
while (extension && pos == stream->bytes_left)
{ {
bool status; if (PB_HTYPE(type) == PB_HTYPE_OPTIONAL)
if (extension->type->decode) {
status = extension->type->decode(stream, extension, tag, wire_type); *(bool*)iter->pSize = false;
else }
status = default_extension_handler(stream, extension, tag, wire_type); else if (PB_HTYPE(type) == PB_HTYPE_REPEATED)
{
if (!status) *(size_t*)iter->pSize = 0;
return false; }
extension = extension->next;
} }
return true;
} }
/* Step through the iterator until an extension field is found or until all /* Initialize message field to default value. Recurses on submessages. */
* entries have been checked. There can be only one extension field per static void pb_set_field_to_default(const pb_field_iterator_t *iter)
* message. Returns false if no extension field is found. */
static bool checkreturn find_extension_field(pb_field_iterator_t *iter)
{ {
unsigned start = iter->field_index; pb_type_t type;
type = iter->pos->type;
do { if (iter->pos->tag == 0)
if (PB_LTYPE(iter->pos->type) == PB_LTYPE_EXTENSION) return; /* Empty message type */
return true;
pb_field_next(iter);
} while (iter->field_index != start);
return false; /* We only need to initialize static fields.
} * Furthermore, arrays do not need to be initialized as their length
* will be zero by default.
/* Initialize message fields to default values, recursively */ */
static void pb_message_set_to_defaults(const pb_field_t fields[], void *dest_struct) if (PB_ATYPE(type) == PB_ATYPE_STATIC &&
{ PB_HTYPE(type) != PB_HTYPE_REPEATED)
pb_field_iterator_t iter;
pb_field_init(&iter, fields, dest_struct);
/* Initialize size/has fields and apply default values */
do
{ {
pb_type_t type; if (PB_LTYPE(iter->pos->type) == PB_LTYPE_SUBMESSAGE)
type = iter.pos->type;
if (iter.pos->tag == 0)
continue;
if (PB_ATYPE(type) == PB_ATYPE_STATIC)
{ {
/* Initialize the size field for optional/repeated fields to 0. */ /* Submessage: initialize the fields recursively */
if (PB_HTYPE(type) == PB_HTYPE_OPTIONAL) pb_field_iterator_t subiter;
{ pb_field_init(&subiter, (const pb_field_t *)iter->pos->ptr, iter->pData);
*(bool*)iter.pSize = false; do {
} pb_clear_field_count(&subiter);
else if (PB_HTYPE(type) == PB_HTYPE_REPEATED) pb_set_field_to_default(&subiter);
{ } while (pb_field_next(&subiter));
*(size_t*)iter.pSize = 0;
continue; /* Array is empty, no need to initialize contents */
}
/* Initialize field contents to default value */
if (PB_LTYPE(iter.pos->type) == PB_LTYPE_SUBMESSAGE)
{
pb_message_set_to_defaults((const pb_field_t *) iter.pos->ptr, iter.pData);
}
else if (iter.pos->ptr != NULL)
{
memcpy(iter.pData, iter.pos->ptr, iter.pos->data_size);
}
else
{
memset(iter.pData, 0, iter.pos->data_size);
}
} }
else if (PB_ATYPE(type) == PB_ATYPE_CALLBACK) else if (iter->pos->ptr != NULL)
{ {
continue; /* Don't overwrite callback */ /* Normal field: copy the default value */
memcpy(iter->pData, iter->pos->ptr, iter->pos->data_size);
} }
} while (pb_field_next(&iter)); else
{
/* Normal field without default value: initialize to zero */
memset(iter->pData, 0, iter->pos->data_size);
}
}
} }
/********************* /*********************
* Decode all fields * * Decode all fields *
*********************/ *********************/
bool checkreturn pb_decode_noinit(pb_istream_t *stream, const pb_field_t fields[], void *dest_struct) /* Helper function to initialize fields while advancing iterator */
static void advance_iterator(pb_field_iterator_t *iter, bool *initialize, bool *current_seen)
{
/* Initialize the fields we didn't decode. */
if (*initialize && !*current_seen)
pb_set_field_to_default(iter);
/* Stop initializing after the first pass through the array */
if (!pb_field_next(iter))
*initialize = false;
/* Clear the field count before decoding */
if (*initialize)
pb_clear_field_count(iter);
/* Reset the flag to indicate that the new field has not been written to yet. */
*current_seen = false;
}
static bool checkreturn pb_decode_inner(pb_istream_t *stream, const pb_field_t fields[], void *dest_struct, bool initialize)
{ {
uint8_t fields_seen[(PB_MAX_REQUIRED_FIELDS + 7) / 8] = {0}; /* Used to check for required fields */ uint8_t fields_seen[(PB_MAX_REQUIRED_FIELDS + 7) / 8] = {0}; /* Used to check for required fields */
uint32_t extension_range_start = 0; bool current_seen = false;
pb_field_iterator_t iter; pb_field_iterator_t iter;
pb_field_init(&iter, fields, dest_struct); pb_field_init(&iter, fields, dest_struct);
pb_clear_field_count(&iter);
while (stream->bytes_left) while (stream->bytes_left)
{ {
uint32_t tag; uint32_t tag;
pb_wire_type_t wire_type; pb_wire_type_t wire_type;
bool eof; bool eof;
unsigned start;
bool skip = false;
if (!pb_decode_tag(stream, &wire_type, &tag, &eof)) if (!pb_decode_tag(stream, &wire_type, &tag, &eof))
{ {
@@ -615,43 +567,45 @@ bool checkreturn pb_decode_noinit(pb_istream_t *stream, const pb_field_t fields[
return false; return false;
} }
if (!pb_field_find(&iter, tag)) /* Go through the fields until we either find a match or
* wrap around to start. On the first pass, also initialize
* any missing fields.
*
* The logic here is to avoid unnecessary initialization
* in the common case, where all fields occur in the proper
* order.
*/
start = iter.field_index;
while (iter.pos->tag != tag)
{ {
/* No match found, check if it matches an extension. */ advance_iterator(&iter, &initialize, &current_seen);
if (tag >= extension_range_start)
if (iter.field_index == start)
{ {
if (!find_extension_field(&iter)) skip = true;
extension_range_start = (uint32_t)-1; break;
else
extension_range_start = iter.pos->tag;
if (tag >= extension_range_start)
{
size_t pos = stream->bytes_left;
if (!decode_extension(stream, tag, wire_type, &iter))
return false;
if (pos != stream->bytes_left)
{
/* The field was handled */
continue;
}
}
} }
}
/* No match found, skip data */ /* Skip data if field was not found */
if (skip)
{
if (!pb_skip_field(stream, wire_type)) if (!pb_skip_field(stream, wire_type))
return false; return false;
continue; continue;
} }
current_seen = true;
/* Keep track if all required fields are present */
if (PB_HTYPE(iter.pos->type) == PB_HTYPE_REQUIRED if (PB_HTYPE(iter.pos->type) == PB_HTYPE_REQUIRED
&& iter.required_field_index < PB_MAX_REQUIRED_FIELDS) && iter.required_field_index < PB_MAX_REQUIRED_FIELDS)
{ {
fields_seen[iter.required_field_index >> 3] |= (uint8_t)(1 << (iter.required_field_index & 7)); fields_seen[iter.required_field_index >> 3] |= (uint8_t)(1 << (iter.required_field_index & 7));
} }
/* Finally, decode the field data */
if (!decode_field(stream, wire_type, &iter)) if (!decode_field(stream, wire_type, &iter))
return false; return false;
} }
@@ -661,6 +615,9 @@ bool checkreturn pb_decode_noinit(pb_istream_t *stream, const pb_field_t fields[
/* First figure out the number of required fields by /* First figure out the number of required fields by
* seeking to the end of the field array. Usually we * seeking to the end of the field array. Usually we
* are already close to end after decoding. * are already close to end after decoding.
*
* Note: this simultaneously initializes any fields
* that haven't been already initialized.
*/ */
unsigned req_field_count; unsigned req_field_count;
pb_type_t last_type; pb_type_t last_type;
@@ -668,7 +625,8 @@ bool checkreturn pb_decode_noinit(pb_istream_t *stream, const pb_field_t fields[
do { do {
req_field_count = iter.required_field_index; req_field_count = iter.required_field_index;
last_type = iter.pos->type; last_type = iter.pos->type;
} while (pb_field_next(&iter)); advance_iterator(&iter, &initialize, &current_seen);
} while (iter.field_index != 0);
/* Fixup if last field was also required. */ /* Fixup if last field was also required. */
if (PB_HTYPE(last_type) == PB_HTYPE_REQUIRED && iter.pos->tag) if (PB_HTYPE(last_type) == PB_HTYPE_REQUIRED && iter.pos->tag)
@@ -689,23 +647,14 @@ bool checkreturn pb_decode_noinit(pb_istream_t *stream, const pb_field_t fields[
return true; return true;
} }
bool checkreturn pb_decode(pb_istream_t *stream, const pb_field_t fields[], void *dest_struct) bool checkreturn pb_decode_noinit(pb_istream_t *stream, const pb_field_t fields[], void *dest_struct)
{ {
pb_message_set_to_defaults(fields, dest_struct); return pb_decode_inner(stream, fields, dest_struct, false);
return pb_decode_noinit(stream, fields, dest_struct);
} }
bool pb_decode_delimited(pb_istream_t *stream, const pb_field_t fields[], void *dest_struct) bool checkreturn pb_decode(pb_istream_t *stream, const pb_field_t fields[], void *dest_struct)
{ {
pb_istream_t substream; return pb_decode_inner(stream, fields, dest_struct, true);
bool status;
if (!pb_make_string_substream(stream, &substream))
return false;
status = pb_decode(&substream, fields, dest_struct);
pb_close_string_substream(stream, &substream);
return status;
} }
/* Field decoders */ /* Field decoders */
@@ -769,8 +718,7 @@ bool pb_decode_fixed64(pb_istream_t *stream, void *dest)
bool checkreturn pb_dec_varint(pb_istream_t *stream, const pb_field_t *field, void *dest) bool checkreturn pb_dec_varint(pb_istream_t *stream, const pb_field_t *field, void *dest)
{ {
uint64_t value; uint64_t value;
if (!pb_decode_varint(stream, &value)) bool status = pb_decode_varint(stream, &value);
return false;
switch (field->data_size) switch (field->data_size)
{ {
@@ -781,14 +729,13 @@ bool checkreturn pb_dec_varint(pb_istream_t *stream, const pb_field_t *field, vo
default: PB_RETURN_ERROR(stream, "invalid data_size"); default: PB_RETURN_ERROR(stream, "invalid data_size");
} }
return true; return status;
} }
bool checkreturn pb_dec_svarint(pb_istream_t *stream, const pb_field_t *field, void *dest) bool checkreturn pb_dec_svarint(pb_istream_t *stream, const pb_field_t *field, void *dest)
{ {
int64_t value; int64_t value;
if (!pb_decode_svarint(stream, &value)) bool status = pb_decode_svarint(stream, &value);
return false;
switch (field->data_size) switch (field->data_size)
{ {
@@ -797,7 +744,7 @@ bool checkreturn pb_dec_svarint(pb_istream_t *stream, const pb_field_t *field, v
default: PB_RETURN_ERROR(stream, "invalid data_size"); default: PB_RETURN_ERROR(stream, "invalid data_size");
} }
return true; return status;
} }
bool checkreturn pb_dec_fixed32(pb_istream_t *stream, const pb_field_t *field, void *dest) bool checkreturn pb_dec_fixed32(pb_istream_t *stream, const pb_field_t *field, void *dest)
@@ -856,12 +803,7 @@ bool checkreturn pb_dec_submessage(pb_istream_t *stream, const pb_field_t *field
if (field->ptr == NULL) if (field->ptr == NULL)
PB_RETURN_ERROR(stream, "invalid field descriptor"); PB_RETURN_ERROR(stream, "invalid field descriptor");
/* New array entries need to be initialized, while required and optional status = pb_decode(&substream, submsg_fields, dest);
* submessages have already been initialized in the top-level pb_decode. */
if (PB_HTYPE(field->type) == PB_HTYPE_REPEATED)
status = pb_decode(&substream, submsg_fields, dest);
else
status = pb_decode_noinit(&substream, submsg_fields, dest);
pb_close_string_substream(stream, &substream); pb_close_string_substream(stream, &substream);
return status; return status;

View File

@@ -1,29 +1,34 @@
/* pb_decode.h: Functions to decode protocol buffers. Depends on pb_decode.c.
* The main function is pb_decode. You also need an input stream, and the
* field descriptions created by nanopb_generator.py.
*/
#ifndef _PB_DECODE_H_ #ifndef _PB_DECODE_H_
#define _PB_DECODE_H_ #define _PB_DECODE_H_
/* pb_decode.h: Functions to decode protocol buffers. Depends on pb_decode.c.
* The main function is pb_decode. You will also need to create an input
* stream, which is easiest to do with pb_istream_from_buffer().
*
* You also need structures and their corresponding pb_field_t descriptions.
* These are usually generated from .proto-files with a script.
*/
#include <stdbool.h>
#include "pb.h" #include "pb.h"
#ifdef __cplusplus #ifdef __cplusplus
extern "C" { extern "C" {
#endif #endif
/* Structure for defining custom input streams. You will need to provide /* Lightweight input stream.
* a callback function to read the bytes from your storage, which can be * You can provide a callback function for reading or use
* for example a file or a network socket. * pb_istream_from_buffer.
*
* The callback must conform to these rules:
* *
* Rules for callback:
* 1) Return false on IO errors. This will cause decoding to abort. * 1) Return false on IO errors. This will cause decoding to abort.
*
* 2) You can use state to store your own data (e.g. buffer pointer), * 2) You can use state to store your own data (e.g. buffer pointer),
* and rely on pb_read to verify that no-body reads past bytes_left. * and rely on pb_read to verify that no-body reads past bytes_left.
*
* 3) Your callback may be used with substreams, in which case bytes_left * 3) Your callback may be used with substreams, in which case bytes_left
* is different than from the main stream. Don't use bytes_left to compute * is different than from the main stream. Don't use bytes_left to compute
* any pointers. * any pointers.
*/ */
struct _pb_istream_t struct _pb_istream_t
{ {
@@ -45,25 +50,12 @@ struct _pb_istream_t
#endif #endif
}; };
/*************************** pb_istream_t pb_istream_from_buffer(uint8_t *buf, size_t bufsize);
* Main decoding functions * bool pb_read(pb_istream_t *stream, uint8_t *buf, size_t count);
***************************/
/* Decode a single protocol buffers message from input stream into a C structure. /* Decode from stream to destination struct.
* Returns true on success, false on any failure. * Returns true on success, false on any failure.
* The actual struct pointed to by dest must match the description in fields. * The actual struct pointed to by dest must match the description in fields.
* Callback fields of the destination structure must be initialized by caller.
* All other fields will be initialized by this function.
*
* Example usage:
* MyMessage msg = {};
* uint8_t buffer[64];
* pb_istream_t stream;
*
* // ... read some data into buffer ...
*
* stream = pb_istream_from_buffer(buffer, count);
* pb_decode(&stream, MyMessage_fields, &msg);
*/ */
bool pb_decode(pb_istream_t *stream, const pb_field_t fields[], void *dest_struct); bool pb_decode(pb_istream_t *stream, const pb_field_t fields[], void *dest_struct);
@@ -71,38 +63,14 @@ bool pb_decode(pb_istream_t *stream, const pb_field_t fields[], void *dest_struc
* to default values. This is slightly faster if you need no default values * to default values. This is slightly faster if you need no default values
* and just do memset(struct, 0, sizeof(struct)) yourself. * and just do memset(struct, 0, sizeof(struct)) yourself.
* *
* This can also be used for 'merging' two messages, i.e. update only the * It can also be used to merge fields from a new message into a previously
* fields that exist in the new message. * initialized structure.
*/ */
bool pb_decode_noinit(pb_istream_t *stream, const pb_field_t fields[], void *dest_struct); bool pb_decode_noinit(pb_istream_t *stream, const pb_field_t fields[], void *dest_struct);
/* Same as pb_decode, except expects the stream to start with the message size /* --- Helper functions ---
* encoded as varint. Corresponds to parseDelimitedFrom() in Google's * You may want to use these from your caller or callbacks.
* protobuf API.
*/ */
bool pb_decode_delimited(pb_istream_t *stream, const pb_field_t fields[], void *dest_struct);
/**************************************
* Functions for manipulating streams *
**************************************/
/* Create an input stream for reading from a memory buffer.
*
* Alternatively, you can use a custom stream that reads directly from e.g.
* a file or a network socket.
*/
pb_istream_t pb_istream_from_buffer(uint8_t *buf, size_t bufsize);
/* Function to read from a pb_istream_t. You can use this if you need to
* read some custom header data, or to read data in field callbacks.
*/
bool pb_read(pb_istream_t *stream, uint8_t *buf, size_t count);
/************************************************
* Helper functions for writing field callbacks *
************************************************/
/* Decode the tag for the next field in the stream. Gives the wire type and /* Decode the tag for the next field in the stream. Gives the wire type and
* field tag. At end of the message, returns false and sets eof to true. */ * field tag. At end of the message, returns false and sets eof to true. */
@@ -131,10 +99,10 @@ bool pb_decode_fixed64(pb_istream_t *stream, void *dest);
bool pb_make_string_substream(pb_istream_t *stream, pb_istream_t *substream); bool pb_make_string_substream(pb_istream_t *stream, pb_istream_t *substream);
void pb_close_string_substream(pb_istream_t *stream, pb_istream_t *substream); void pb_close_string_substream(pb_istream_t *stream, pb_istream_t *substream);
/* --- Internal functions ---
/******************************* * These functions are not terribly useful for the average library user, but
* Internal / legacy functions * * are exported to make the unit testing and extending nanopb easier.
*******************************/ */
#ifdef NANOPB_INTERNALS #ifdef NANOPB_INTERNALS
bool pb_dec_varint(pb_istream_t *stream, const pb_field_t *field, void *dest); bool pb_dec_varint(pb_istream_t *stream, const pb_field_t *field, void *dest);

View File

@@ -28,8 +28,7 @@ static const pb_encoder_t PB_ENCODERS[PB_LTYPES_COUNT] = {
&pb_enc_bytes, &pb_enc_bytes,
&pb_enc_string, &pb_enc_string,
&pb_enc_submessage, &pb_enc_submessage
NULL /* extensions */
}; };
/* pb_ostream_t implementation */ /* pb_ostream_t implementation */
@@ -84,7 +83,10 @@ bool checkreturn pb_write(pb_ostream_t *stream, const uint8_t *buf, size_t count
/* Main encoding stuff */ /* Main encoding stuff */
/* Encode a static array. Handles the size calculations and possible packing. */ /* Callbacks don't need this function because they usually know the data type
* without examining the field structure.
* Therefore it is static for now.
*/
static bool checkreturn encode_array(pb_ostream_t *stream, const pb_field_t *field, static bool checkreturn encode_array(pb_ostream_t *stream, const pb_field_t *field,
const void *pData, size_t count, pb_encoder_t func) const void *pData, size_t count, pb_encoder_t func)
{ {
@@ -95,7 +97,6 @@ static bool checkreturn encode_array(pb_ostream_t *stream, const pb_field_t *fie
if (count == 0) if (count == 0)
return true; return true;
/* We always pack arrays if the datatype allows it. */
if (PB_LTYPE(field->type) <= PB_LTYPE_LAST_PACKABLE) if (PB_LTYPE(field->type) <= PB_LTYPE_LAST_PACKABLE)
{ {
if (!pb_encode_tag(stream, PB_WT_STRING, field->tag)) if (!pb_encode_tag(stream, PB_WT_STRING, field->tag))
@@ -154,21 +155,13 @@ static bool checkreturn encode_array(pb_ostream_t *stream, const pb_field_t *fie
return true; return true;
} }
/* Encode a field with static allocation, i.e. one whose data is stored bool checkreturn encode_static_field(pb_ostream_t *stream, const pb_field_t *field, const void *pData)
* in the structure itself. */
static bool checkreturn encode_static_field(pb_ostream_t *stream,
const pb_field_t *field, const void *pData)
{ {
pb_encoder_t func; pb_encoder_t func;
const void *pSize; const void *pSize;
bool dummy = true;
func = PB_ENCODERS[PB_LTYPE(field->type)]; func = PB_ENCODERS[PB_LTYPE(field->type)];
pSize = (const char*)pData + field->size_offset;
if (field->size_offset)
pSize = (const char*)pData + field->size_offset;
else
pSize = &dummy;
switch (PB_HTYPE(field->type)) switch (PB_HTYPE(field->type))
{ {
@@ -202,10 +195,7 @@ static bool checkreturn encode_static_field(pb_ostream_t *stream,
return true; return true;
} }
/* Encode a field with callback semantics. This means that a user function is bool checkreturn encode_callback_field(pb_ostream_t *stream, const pb_field_t *field, const void *pData)
* called to provide and encode the actual data. */
static bool checkreturn encode_callback_field(pb_ostream_t *stream,
const pb_field_t *field, const void *pData)
{ {
const pb_callback_t *callback = (const pb_callback_t*)pData; const pb_callback_t *callback = (const pb_callback_t*)pData;
@@ -223,57 +213,6 @@ static bool checkreturn encode_callback_field(pb_ostream_t *stream,
return true; return true;
} }
/* Encode a single field of any callback or static type. */
static bool checkreturn encode_field(pb_ostream_t *stream,
const pb_field_t *field, const void *pData)
{
switch (PB_ATYPE(field->type))
{
case PB_ATYPE_STATIC:
return encode_static_field(stream, field, pData);
case PB_ATYPE_CALLBACK:
return encode_callback_field(stream, field, pData);
default:
PB_RETURN_ERROR(stream, "invalid field type");
}
}
/* Default handler for extension fields. Expects to have a pb_field_t
* pointer in the extension->type->arg field. */
static bool checkreturn default_extension_handler(pb_ostream_t *stream,
const pb_extension_t *extension)
{
const pb_field_t *field = (const pb_field_t*)extension->type->arg;
return encode_field(stream, field, extension->dest);
}
/* Walk through all the registered extensions and give them a chance
* to encode themselves. */
static bool checkreturn encode_extension_field(pb_ostream_t *stream,
const pb_field_t *field, const void *pData)
{
const pb_extension_t *extension = *(const pb_extension_t* const *)pData;
UNUSED(field);
while (extension)
{
bool status;
if (extension->type->encode)
status = extension->type->encode(stream, extension);
else
status = default_extension_handler(stream, extension);
if (!status)
return false;
extension = extension->next;
}
return true;
}
bool checkreturn pb_encode(pb_ostream_t *stream, const pb_field_t fields[], const void *src_struct) bool checkreturn pb_encode(pb_ostream_t *stream, const pb_field_t fields[], const void *src_struct)
{ {
const pb_field_t *field = fields; const pb_field_t *field = fields;
@@ -292,17 +231,20 @@ bool checkreturn pb_encode(pb_ostream_t *stream, const pb_field_t fields[], cons
prev_size *= field->array_size; prev_size *= field->array_size;
} }
if (PB_LTYPE(field->type) == PB_LTYPE_EXTENSION) switch (PB_ATYPE(field->type))
{ {
/* Special case for the extension field placeholder */ case PB_ATYPE_STATIC:
if (!encode_extension_field(stream, field, pData)) if (!encode_static_field(stream, field, pData))
return false; return false;
} break;
else
{ case PB_ATYPE_CALLBACK:
/* Regular field */ if (!encode_callback_field(stream, field, pData))
if (!encode_field(stream, field, pData)) return false;
return false; break;
default:
PB_RETURN_ERROR(stream, "invalid field type");
} }
field++; field++;
@@ -311,11 +253,6 @@ bool checkreturn pb_encode(pb_ostream_t *stream, const pb_field_t fields[], cons
return true; return true;
} }
bool pb_encode_delimited(pb_ostream_t *stream, const pb_field_t fields[], const void *src_struct)
{
return pb_encode_submessage(stream, fields, src_struct);
}
/* Helper functions */ /* Helper functions */
bool checkreturn pb_encode_varint(pb_ostream_t *stream, uint64_t value) bool checkreturn pb_encode_varint(pb_ostream_t *stream, uint64_t value)
{ {
@@ -524,16 +461,8 @@ bool checkreturn pb_enc_bytes(pb_ostream_t *stream, const pb_field_t *field, con
bool checkreturn pb_enc_string(pb_ostream_t *stream, const pb_field_t *field, const void *src) bool checkreturn pb_enc_string(pb_ostream_t *stream, const pb_field_t *field, const void *src)
{ {
/* strnlen() is not always available, so just use a for-loop */ UNUSED(field);
size_t size = 0; return pb_encode_string(stream, (const uint8_t*)src, strlen((const char*)src));
const char *p = (const char*)src;
while (size < field->data_size && *p != '\0')
{
size++;
p++;
}
return pb_encode_string(stream, (const uint8_t*)src, size);
} }
bool checkreturn pb_enc_submessage(pb_ostream_t *stream, const pb_field_t *field, const void *src) bool checkreturn pb_enc_submessage(pb_ostream_t *stream, const pb_field_t *field, const void *src)

View File

@@ -1,28 +1,34 @@
/* pb_encode.h: Functions to encode protocol buffers. Depends on pb_encode.c.
* The main function is pb_encode. You also need an output stream, and the
* field descriptions created by nanopb_generator.py.
*/
#ifndef _PB_ENCODE_H_ #ifndef _PB_ENCODE_H_
#define _PB_ENCODE_H_ #define _PB_ENCODE_H_
/* pb_encode.h: Functions to encode protocol buffers. Depends on pb_encode.c.
* The main function is pb_encode. You also need an output stream, structures
* and their field descriptions (just like with pb_decode).
*/
#include <stdbool.h>
#include "pb.h" #include "pb.h"
#ifdef __cplusplus #ifdef __cplusplus
extern "C" { extern "C" {
#endif #endif
/* Structure for defining custom output streams. You will need to provide /* Lightweight output stream.
* a callback function to write the bytes to your storage, which can be * You can provide callback for writing or use pb_ostream_from_buffer.
* for example a file or a network socket.
* *
* The callback must conform to these rules: * Alternatively, callback can be NULL in which case the stream will just
* count the number of bytes that would have been written. In this case
* max_size is not checked.
* *
* Rules for callback:
* 1) Return false on IO errors. This will cause encoding to abort. * 1) Return false on IO errors. This will cause encoding to abort.
*
* 2) You can use state to store your own data (e.g. buffer pointer). * 2) You can use state to store your own data (e.g. buffer pointer).
*
* 3) pb_write will update bytes_written after your callback runs. * 3) pb_write will update bytes_written after your callback runs.
* 4) Substreams will modify max_size and bytes_written. Don't use them *
* to calculate any pointers. * 4) Substreams will modify max_size and bytes_written. Don't use them to
* calculate any pointers.
*/ */
struct _pb_ostream_t struct _pb_ostream_t
{ {
@@ -37,84 +43,42 @@ struct _pb_ostream_t
#else #else
bool (*callback)(pb_ostream_t *stream, const uint8_t *buf, size_t count); bool (*callback)(pb_ostream_t *stream, const uint8_t *buf, size_t count);
#endif #endif
void *state; /* Free field for use by callback implementation. */ void *state; /* Free field for use by callback implementation */
size_t max_size; /* Limit number of output bytes written (or use SIZE_MAX). */ size_t max_size; /* Limit number of output bytes written (or use SIZE_MAX). */
size_t bytes_written; /* Number of bytes written so far. */ size_t bytes_written;
#ifndef PB_NO_ERRMSG #ifndef PB_NO_ERRMSG
const char *errmsg; const char *errmsg;
#endif #endif
}; };
/***************************
* Main encoding functions *
***************************/
/* Encode a single protocol buffers message from C structure into a stream.
* Returns true on success, false on any failure.
* The actual struct pointed to by src_struct must match the description in fields.
* All required fields in the struct are assumed to have been filled in.
*
* Example usage:
* MyMessage msg = {};
* uint8_t buffer[64];
* pb_ostream_t stream;
*
* msg.field1 = 42;
* stream = pb_ostream_from_buffer(buffer, sizeof(buffer));
* pb_encode(&stream, MyMessage_fields, &msg);
*/
bool pb_encode(pb_ostream_t *stream, const pb_field_t fields[], const void *src_struct);
/* Same as pb_encode, but prepends the length of the message as a varint.
* Corresponds to writeDelimitedTo() in Google's protobuf API.
*/
bool pb_encode_delimited(pb_ostream_t *stream, const pb_field_t fields[], const void *src_struct);
/**************************************
* Functions for manipulating streams *
**************************************/
/* Create an output stream for writing into a memory buffer.
* The number of bytes written can be found in stream.bytes_written after
* encoding the message.
*
* Alternatively, you can use a custom stream that writes directly to e.g.
* a file or a network socket.
*/
pb_ostream_t pb_ostream_from_buffer(uint8_t *buf, size_t bufsize); pb_ostream_t pb_ostream_from_buffer(uint8_t *buf, size_t bufsize);
bool pb_write(pb_ostream_t *stream, const uint8_t *buf, size_t count);
/* Pseudo-stream for measuring the size of a message without actually storing /* Stream type for use in computing message sizes */
* the encoded data.
*
* Example usage:
* MyMessage msg = {};
* pb_ostream_t stream = PB_OSTREAM_SIZING;
* pb_encode(&stream, MyMessage_fields, &msg);
* printf("Message size is %d\n", stream.bytes_written);
*/
#ifndef PB_NO_ERRMSG #ifndef PB_NO_ERRMSG
#define PB_OSTREAM_SIZING {0,0,0,0,0} #define PB_OSTREAM_SIZING {0,0,0,0,0}
#else #else
#define PB_OSTREAM_SIZING {0,0,0,0} #define PB_OSTREAM_SIZING {0,0,0,0}
#endif #endif
/* Function to write into a pb_ostream_t stream. You can use this if you need /* Encode struct to given output stream.
* to append or prepend some custom headers to the message. * Returns true on success, false on any failure.
* The actual struct pointed to by src_struct must match the description in fields.
* All required fields in the struct are assumed to have been filled in.
*/ */
bool pb_write(pb_ostream_t *stream, const uint8_t *buf, size_t count); bool pb_encode(pb_ostream_t *stream, const pb_field_t fields[], const void *src_struct);
/* --- Helper functions ---
* You may want to use these from your caller or callbacks.
*/
/************************************************ /* Encode field header based on LTYPE and field number defined in the field structure.
* Helper functions for writing field callbacks * * Call this from the callback before writing out field contents. */
************************************************/
/* Encode field header based on type and field number defined in the field
* structure. Call this from the callback before writing out field contents. */
bool pb_encode_tag_for_field(pb_ostream_t *stream, const pb_field_t *field); bool pb_encode_tag_for_field(pb_ostream_t *stream, const pb_field_t *field);
/* Encode field header by manually specifing wire type. You need to use this /* Encode field header by manually specifing wire type. You need to use this if
* if you want to write out packed arrays from a callback field. */ * you want to write out packed arrays from a callback field. */
bool pb_encode_tag(pb_ostream_t *stream, pb_wire_type_t wiretype, uint32_t field_number); bool pb_encode_tag(pb_ostream_t *stream, pb_wire_type_t wiretype, uint32_t field_number);
/* Encode an integer in the varint format. /* Encode an integer in the varint format.
@@ -137,16 +101,15 @@ bool pb_encode_fixed32(pb_ostream_t *stream, const void *value);
bool pb_encode_fixed64(pb_ostream_t *stream, const void *value); bool pb_encode_fixed64(pb_ostream_t *stream, const void *value);
/* Encode a submessage field. /* Encode a submessage field.
* You need to pass the pb_field_t array and pointer to struct, just like * You need to pass the pb_field_t array and pointer to struct, just like with pb_encode().
* with pb_encode(). This internally encodes the submessage twice, first to * This internally encodes the submessage twice, first to calculate message size and then to actually write it out.
* calculate message size and then to actually write it out.
*/ */
bool pb_encode_submessage(pb_ostream_t *stream, const pb_field_t fields[], const void *src_struct); bool pb_encode_submessage(pb_ostream_t *stream, const pb_field_t fields[], const void *src_struct);
/* --- Internal functions ---
/******************************* * These functions are not terribly useful for the average library user, but
* Internal / legacy functions * * are exported to make the unit testing and extending nanopb easier.
*******************************/ */
#ifdef NANOPB_INTERNALS #ifdef NANOPB_INTERNALS
bool pb_enc_varint(pb_ostream_t *stream, const pb_field_t *field, const void *src); bool pb_enc_varint(pb_ostream_t *stream, const pb_field_t *field, const void *src);

View File

@@ -1,6 +1,139 @@
all: CFLAGS=-ansi -Wall -Werror -I .. -g -O0
scons DEPS=../pb_decode.h ../pb_encode.h ../pb.h person.pb.h \
callbacks2.pb.h callbacks.pb.h unittests.h unittestproto.pb.h \
alltypes.pb.h missing_fields.pb.h
TESTS= decode_unittests encode_unittests \
test_decode1 test_decode2 test_decode3 test_decode3_buf \
test_encode1 test_encode2 test_encode3 test_encode3_buf \
test_decode_callbacks test_encode_callbacks \
test_missing_fields test_no_messages test_funny_name \
test_multiple_files test_cxxcompile test_options \
bc_encode bc_decode
# More strict checks for the core part of nanopb
CC_VERSION=$(shell $(CC) -v 2>&1)
CFLAGS_CORE=
ifneq (,$(findstring gcc,$(CC_VERSION)))
CFLAGS_CORE=-pedantic -Wextra -Wcast-qual -Wlogical-op -Wconversion
CFLAGS+=--coverage -fstack-protector-all
LDFLAGS+=--coverage
endif
ifneq (,$(findstring clang,$(CC_VERSION)))
CFLAGS_CORE=-pedantic -Wextra -Wcast-qual -Wconversion
endif
# Also use mudflap if it is available
# To enable, run with make -B USE_MUDFLAP=y
USE_MUDFLAP ?= n
ifeq ($(USE_MUDFLAP),y)
CFLAGS += -fmudflap
LDFLAGS += -lmudflap -fmudflap
endif
all: breakpoints $(TESTS) run_unittests
clean: clean:
scons -c rm -f $(TESTS) person.pb* alltypes.pb* *.o *.gcda *.gcno *.pb.h *.pb.c
%.pb.o: %.pb.c %.pb.h
$(CC) $(CFLAGS) $(CFLAGS_CORE) -c -o $@ $<
%.o: %.c
%.o: %.c $(DEPS)
$(CC) $(CFLAGS) -c -o $@ $<
pb_encode.o: ../pb_encode.c $(DEPS)
$(CC) $(CFLAGS) $(CFLAGS_CORE) -c -o $@ $<
pb_decode.o: ../pb_decode.c $(DEPS)
$(CC) $(CFLAGS) $(CFLAGS_CORE) -c -o $@ $<
# Test for compilability with c++ compiler
pb_encode.cxx.o: ../pb_encode.c $(DEPS)
$(CXX) $(CFLAGS) $(CFLAGS_CORE) -c -o $@ $<
pb_decode.cxx.o: ../pb_decode.c $(DEPS)
$(CXX) $(CFLAGS) $(CFLAGS_CORE) -c -o $@ $<
# Test for PB_BUF_ONLY compilation option
pb_encode.buf.o: ../pb_encode.c $(DEPS)
$(CC) -DPB_BUFFER_ONLY $(CFLAGS) $(CFLAGS_CORE) -c -o $@ $<
pb_decode.buf.o: ../pb_decode.c $(DEPS)
$(CC) -DPB_BUFFER_ONLY $(CFLAGS) $(CFLAGS_CORE) -c -o $@ $<
%.buf.o: %.c $(DEPS)
$(CC) -DPB_BUFFER_ONLY $(CFLAGS) -c -o $@ $<
test_encode3_buf: test_encode3.buf.o pb_encode.buf.o alltypes.pb.o
$(CC) $(LDFLAGS) $^ -o $@
test_decode3_buf: test_decode3.buf.o pb_decode.buf.o alltypes.pb.o
$(CC) $(LDFLAGS) $^ -o $@
test_cxxcompile: pb_encode.cxx.o pb_decode.cxx.o
test_decode1: test_decode1.o pb_decode.o person.pb.o
test_decode2: test_decode2.o pb_decode.o person.pb.o
test_decode3: test_decode3.o pb_decode.o alltypes.pb.o
test_encode1: test_encode1.o pb_encode.o person.pb.o
test_encode2: test_encode2.o pb_encode.o person.pb.o
test_encode3: test_encode3.o pb_encode.o alltypes.pb.o
test_multiple_files: test_multiple_files.o pb_encode.o callbacks2.pb.o callbacks.pb.o
test_decode_callbacks: test_decode_callbacks.o pb_decode.o callbacks.pb.o
test_encode_callbacks: test_encode_callbacks.o pb_encode.o callbacks.pb.o
test_missing_fields: test_missing_fields.o pb_encode.o pb_decode.o missing_fields.pb.o
decode_unittests: decode_unittests.o pb_decode.o unittestproto.pb.o
encode_unittests: encode_unittests.o pb_encode.o unittestproto.pb.o
test_no_messages: no_messages.pb.h no_messages.pb.c no_messages.pb.o
test_funny_name: funny-proto+name.pb.h funny-proto+name.pb.o
bc_encode: bc_alltypes.pb.o pb_encode.o bc_encode.o
bc_decode: bc_alltypes.pb.o pb_decode.o bc_decode.o
%.pb: %.proto
protoc -I. -I../generator -I/usr/include -o$@ $<
%.pb.c %.pb.h: %.pb ../generator/nanopb_generator.py
python ../generator/nanopb_generator.py $<
breakpoints: ../*.c *.c
grep -n 'return false;' $^ | cut -d: -f-2 | xargs -n 1 echo b > $@
coverage: run_unittests
gcov pb_encode.gcda
gcov pb_decode.gcda
run_unittests: $(TESTS)
rm -f *.gcda
./decode_unittests > /dev/null
./encode_unittests > /dev/null
[ "`./test_encode1 | ./test_decode1`" = \
"`./test_encode1 | protoc --decode=Person -I. -I../generator -I/usr/include person.proto`" ]
[ "`./test_encode2 | ./test_decode1`" = \
"`./test_encode2 | protoc --decode=Person -I. -I../generator -I/usr/include person.proto`" ]
[ "`./test_encode2 | ./test_decode2`" = \
"`./test_encode2 | protoc --decode=Person -I. -I../generator -I/usr/include person.proto`" ]
[ "`./test_decode2 < person_with_extra_field.pb`" = \
"`./test_encode2 | ./test_decode2`" ]
[ "`./test_encode_callbacks | ./test_decode_callbacks`" = \
"`./test_encode_callbacks | protoc --decode=TestMessage callbacks.proto`" ]
./test_encode3 | ./test_decode3
./test_encode3 1 | ./test_decode3 1
./test_encode3 1 | protoc --decode=AllTypes -I. -I../generator -I/usr/include alltypes.proto >/dev/null
./test_encode3_buf 1 | ./test_decode3_buf 1
./bc_encode | ./bc_decode
./test_missing_fields
test_options: options.pb.h options.expected options.pb.o
cat options.expected | while read -r p; do \
if ! grep -q "$$p" $<; then \
echo Expected: "$$p"; \
exit 1; \
fi \
done
run_fuzztest: test_decode3
bash -c 'ulimit -c unlimited; I=1; while true; do cat /dev/urandom | ./test_decode3 > /dev/null; I=$$(($$I+1)); echo -en "\r$$I"; done'

View File

@@ -1,112 +0,0 @@
Help('''
Type 'scons' to build and run all the available test cases.
It will automatically detect your platform and C compiler and
build appropriately.
You can modify the behavious using following options:
CC Name of C compiler
CXX Name of C++ compiler
CCFLAGS Flags to pass to the C compiler
CXXFLAGS Flags to pass to the C++ compiler
For example, for a clang build, use:
scons CC=clang CXX=clang++
''')
import os
env = Environment(ENV = os.environ)
# Allow overriding the compiler with scons CC=???
if 'CC' in ARGUMENTS: env.Replace(CC = ARGUMENTS['CC'])
if 'CXX' in ARGUMENTS: env.Replace(CXX = ARGUMENTS['CXX'])
if 'CFLAGS' in ARGUMENTS: env.Append(CCFLAGS = ARGUMENTS['CFLAGS'])
if 'CXXFLAGS' in ARGUMENTS: env.Append(CCFLAGS = ARGUMENTS['CXXFLAGS'])
# Add the builders defined in site_init.py
add_nanopb_builders(env)
# Path to the files shared by tests, and to the nanopb core.
env.Append(CPPPATH = ["#../", "#common"])
# Path for finding nanopb.proto
env.Append(PROTOCPATH = '#../generator')
# Check the compilation environment, unless we are just cleaning up.
if not env.GetOption('clean'):
conf = Configure(env)
# If the platform doesn't support C99, use our own header file instead.
stdbool = conf.CheckCHeader('stdbool.h')
stdint = conf.CheckCHeader('stdint.h')
stddef = conf.CheckCHeader('stddef.h')
string = conf.CheckCHeader('string.h')
if not stdbool or not stdint or not stddef or not string:
conf.env.Append(CPPDEFINES = {'PB_SYSTEM_HEADER': '\\"pb_syshdr.h\\"'})
conf.env.Append(CPPPATH = "#../compat")
if stdbool: conf.env.Append(CPPDEFINES = {'HAVE_STDBOOL_H': 1})
if stdint: conf.env.Append(CPPDEFINES = {'HAVE_STDINT_H': 1})
if stddef: conf.env.Append(CPPDEFINES = {'HAVE_STDDEF_H': 1})
if string: conf.env.Append(CPPDEFINES = {'HAVE_STRING_H': 1})
# Check if we can use pkg-config to find protobuf include path
status, output = conf.TryAction('pkg-config protobuf --variable=includedir > $TARGET')
if status:
conf.env.Append(PROTOCPATH = output.strip())
else:
conf.env.Append(PROTOCPATH = '/usr/include')
# Check if libmudflap is available (only with GCC)
if 'gcc' in env['CC']:
if conf.CheckLib('mudflap'):
conf.env.Append(CCFLAGS = '-fmudflap')
conf.env.Append(LINKFLAGS = '-lmudflap -fmudflap')
# End the config stuff
env = conf.Finish()
# Initialize the CCFLAGS according to the compiler
if 'gcc' in env['CC']:
# GNU Compiler Collection
# Debug info, warnings as errors
env.Append(CFLAGS = '-ansi -pedantic -g -O0 -Wall -Werror --coverage -fstack-protector-all')
env.Append(LINKFLAGS = '--coverage')
# We currently need uint64_t anyway, even though ANSI C90 otherwise..
env.Append(CFLAGS = '-Wno-long-long')
# More strict checks on the nanopb core
env.Append(CORECFLAGS = '-Wextra -Wcast-qual -Wlogical-op -Wconversion')
elif 'clang' in env['CC']:
# CLang
env.Append(CFLAGS = '-ansi -g -O0 -Wall -Werror')
env.Append(CORECFLAGS = ' -Wextra -Wcast-qual -Wconversion')
elif 'cl' in env['CC']:
# Microsoft Visual C++
# Debug info on, warning level 2 for tests, warnings as errors
env.Append(CFLAGS = '/Zi /W2 /WX')
env.Append(LINKFLAGS = '/DEBUG')
# More strict checks on the nanopb core
env.Append(CORECFLAGS = '/W4')
# PB_RETURN_ERROR triggers C4127 because of while(0)
env.Append(CFLAGS = '/wd4127')
elif 'tcc' in env['CC']:
# Tiny C Compiler
env.Append(CFLAGS = '-Wall -Werror -g')
env.SetDefault(CORECFLAGS = '')
if 'clang++' in env['CXX']:
env.Append(CXXFLAGS = '-g -O0 -Wall -Werror -Wextra -Wno-missing-field-initializers')
elif 'g++' in env['CXX']:
env.Append(CXXFLAGS = '-g -O0 -Wall -Werror -Wextra -Wno-missing-field-initializers')
elif 'cl' in env['CXX']:
env.Append(CXXFLAGS = '/Zi /W2 /WX')
# Now include the SConscript files from all subdirectories
SConscript(Glob('*/SConscript'), exports = 'env')

View File

@@ -86,8 +86,5 @@ message AllTypes {
// Just to make sure that the size of the fields has been calculated // Just to make sure that the size of the fields has been calculated
// properly, i.e. otherwise a bug in last field might not be detected. // properly, i.e. otherwise a bug in last field might not be detected.
required int32 end = 99; required int32 end = 99;
extensions 200 to 255;
} }

View File

@@ -1,12 +0,0 @@
# Build and run a test that encodes and decodes a message that contains
# all of the Protocol Buffers data types.
Import("env")
env.NanopbProto("alltypes")
enc = env.Program(["encode_alltypes.c", "alltypes.pb.c", "#common/pb_encode.o"])
dec = env.Program(["decode_alltypes.c", "alltypes.pb.c", "#common/pb_decode.o"])
env.RunTest(enc)
env.RunTest([dec, "encode_alltypes.output"])

View File

@@ -1,11 +0,0 @@
# Check that the old generated .pb.c/.pb.h files are still compatible with the
# current version of nanopb.
Import("env")
enc = env.Program(["encode_legacy.c", "alltypes_legacy.c", "#common/pb_encode.o"])
dec = env.Program(["decode_legacy.c", "alltypes_legacy.c", "#common/pb_decode.o"])
env.RunTest(enc)
env.RunTest([dec, "encode_legacy.output"])

View File

@@ -1,12 +0,0 @@
# Build and run a basic round-trip test using memory buffer encoding.
Import("env")
enc = env.Program(["encode_buffer.c", "#common/person.pb.c", "#common/pb_encode.o"])
dec = env.Program(["decode_buffer.c", "#common/person.pb.c", "#common/pb_decode.o"])
env.RunTest(enc)
env.RunTest([dec, "encode_buffer.output"])
env.Decode(["encode_buffer.output", "#common/person.proto"], MESSAGE = "Person")
env.Compare(["decode_buffer.output", "encode_buffer.decoded"])

View File

@@ -1,12 +0,0 @@
# Build and run a basic round-trip test using direct stream encoding.
Import("env")
enc = env.Program(["encode_stream.c", "#common/person.pb.c", "#common/pb_encode.o"])
dec = env.Program(["decode_stream.c", "#common/person.pb.c", "#common/pb_decode.o"])
env.RunTest(enc)
env.RunTest([dec, "encode_stream.output"])
env.Decode(["encode_stream.output", "#common/person.proto"], MESSAGE = "Person")
env.Compare(["decode_stream.output", "encode_stream.decoded"])

View File

@@ -5,7 +5,7 @@
* incompatible changes made to the generator in future versions. * incompatible changes made to the generator in future versions.
*/ */
#include "alltypes_legacy.h" #include "bc_alltypes.pb.h"
const char SubMessage_substuff1_default[16] = "1"; const char SubMessage_substuff1_default[16] = "1";
const int32_t SubMessage_substuff2_default = 2; const int32_t SubMessage_substuff2_default = 2;

View File

@@ -1,16 +1,16 @@
/* Tests the decoding of all types. /* Tests the decoding of all types.
* This is a backwards-compatibility test, using alltypes_legacy.h. * This is a backwards-compatibility test, using bc_alltypes.pb.h.
* It is similar to decode_alltypes, but duplicated in order to allow * It is similar to test_decode3, but duplicated in order to allow
* decode_alltypes to test any new features introduced later. * test_decode3 to test any new features introduced later.
* *
* Run e.g. ./encode_legacy | ./decode_legacy * Run e.g. ./bc_encode | ./bc_decode
*/ */
#include <stdio.h> #include <stdio.h>
#include <string.h> #include <string.h>
#include <stdlib.h> #include <stdlib.h>
#include <pb_decode.h> #include <pb_decode.h>
#include "alltypes_legacy.h" #include "bc_alltypes.pb.h"
#define TEST(x) if (!(x)) { \ #define TEST(x) if (!(x)) { \
printf("Test " #x " failed.\n"); \ printf("Test " #x " failed.\n"); \

View File

@@ -1,15 +1,14 @@
/* Attempts to test all the datatypes supported by ProtoBuf. /* Attempts to test all the datatypes supported by ProtoBuf.
* This is a backwards-compatibility test, using alltypes_legacy.h. * This is a backwards-compatibility test, using bc_alltypes.pb.h.
* It is similar to encode_alltypes, but duplicated in order to allow * It is similar to test_encode3, but duplicated in order to allow
* encode_alltypes to test any new features introduced later. * test_encode3 to test any new features introduced later.
*/ */
#include <stdio.h> #include <stdio.h>
#include <stdlib.h> #include <stdlib.h>
#include <string.h> #include <string.h>
#include <pb_encode.h> #include <pb_encode.h>
#include "alltypes_legacy.h" #include "bc_alltypes.pb.h"
#include "test_helpers.h"
int main(int argc, char **argv) int main(int argc, char **argv)
{ {
@@ -115,21 +114,18 @@ int main(int argc, char **argv)
alltypes.end = 1099; alltypes.end = 1099;
{ uint8_t buffer[1024];
uint8_t buffer[1024]; pb_ostream_t stream = pb_ostream_from_buffer(buffer, sizeof(buffer));
pb_ostream_t stream = pb_ostream_from_buffer(buffer, sizeof(buffer));
/* Now encode it and check if we succeeded. */ /* Now encode it and check if we succeeded. */
if (pb_encode(&stream, AllTypes_fields, &alltypes)) if (pb_encode(&stream, AllTypes_fields, &alltypes))
{ {
SET_BINARY_MODE(stdout); fwrite(buffer, 1, stream.bytes_written, stdout);
fwrite(buffer, 1, stream.bytes_written, stdout); return 0; /* Success */
return 0; /* Success */ }
} else
else {
{ fprintf(stderr, "Encoding failed!\n");
fprintf(stderr, "Encoding failed!\n"); return 1; /* Failure */
return 1; /* Failure */
}
} }
} }

View File

@@ -1,23 +0,0 @@
# Run the alltypes test case, but compile with PB_BUFFER_ONLY=1
Import("env")
# Take copy of the files for custom build.
c = Copy("$TARGET", "$SOURCE")
env.Command("pb_encode.c", "#../pb_encode.c", c)
env.Command("pb_decode.c", "#../pb_decode.c", c)
env.Command("alltypes.pb.h", "#alltypes/alltypes.pb.h", c)
env.Command("alltypes.pb.c", "#alltypes/alltypes.pb.c", c)
env.Command("encode_alltypes.c", "#alltypes/encode_alltypes.c", c)
env.Command("decode_alltypes.c", "#alltypes/decode_alltypes.c", c)
# Define the compilation options
opts = env.Clone()
opts.Append(CPPDEFINES = {'PB_BUFFER_ONLY': 1})
# Now build and run the test normally.
enc = opts.Program(["encode_alltypes.c", "alltypes.pb.c", "pb_encode.c"])
dec = opts.Program(["decode_alltypes.c", "alltypes.pb.c", "pb_decode.c"])
env.RunTest(enc)
env.RunTest([dec, "encode_alltypes.output"])

View File

@@ -11,6 +11,5 @@ message TestMessage {
repeated fixed32 fixed32value = 3; repeated fixed32 fixed32value = 3;
repeated fixed64 fixed64value = 4; repeated fixed64 fixed64value = 4;
optional SubMessage submsg = 5; optional SubMessage submsg = 5;
repeated string repeatedstring = 6;
} }

View File

@@ -1,14 +0,0 @@
# Test the functionality of the callback fields.
Import("env")
env.NanopbProto("callbacks")
enc = env.Program(["encode_callbacks.c", "callbacks.pb.c", "#common/pb_encode.o"])
dec = env.Program(["decode_callbacks.c", "callbacks.pb.c", "#common/pb_decode.o"])
env.RunTest(enc)
env.RunTest([dec, "encode_callbacks.output"])
env.Decode(["encode_callbacks.output", "callbacks.proto"], MESSAGE = "TestMessage")
env.Compare(["decode_callbacks.output", "encode_callbacks.decoded"])

View File

@@ -1,17 +0,0 @@
# Build the common files needed by multiple test cases
Import('env')
# Protocol definitions for the encode/decode_unittests
env.NanopbProto("unittestproto")
# Protocol definitions for basic_buffer/stream tests
env.NanopbProto("person")
# Binaries of the pb_decode.c and pb_encode.c
# These are built using more strict warning flags.
strict = env.Clone()
strict.Append(CFLAGS = strict['CORECFLAGS'])
strict.Object("pb_decode.o", "#../pb_decode.c")
strict.Object("pb_encode.o", "#../pb_encode.c")

View File

@@ -1,17 +0,0 @@
/* Compatibility helpers for the test programs. */
#ifndef _TEST_HELPERS_H_
#define _TEST_HELPERS_H_
#ifdef _WIN32
#include <io.h>
#include <fcntl.h>
#define SET_BINARY_MODE(file) setmode(fileno(file), O_BINARY)
#else
#define SET_BINARY_MODE(file)
#endif
#endif

View File

@@ -1,20 +0,0 @@
# Run the alltypes test case, but compile it as C++ instead.
# In fact, compile the entire nanopb using C++ compiler.
Import("env")
# Copy the files to .cxx extension in order to force C++ build.
c = Copy("$TARGET", "$SOURCE")
env.Command("pb_encode.cxx", "#../pb_encode.c", c)
env.Command("pb_decode.cxx", "#../pb_decode.c", c)
env.Command("alltypes.pb.h", "#alltypes/alltypes.pb.h", c)
env.Command("alltypes.pb.cxx", "#alltypes/alltypes.pb.c", c)
env.Command("encode_alltypes.cxx", "#alltypes/encode_alltypes.c", c)
env.Command("decode_alltypes.cxx", "#alltypes/decode_alltypes.c", c)
# Now build and run the test normally.
enc = env.Program(["encode_alltypes.cxx", "alltypes.pb.cxx", "pb_encode.cxx"])
dec = env.Program(["decode_alltypes.cxx", "alltypes.pb.cxx", "pb_decode.cxx"])
env.RunTest(enc)
env.RunTest([dec, "encode_alltypes.output"])

View File

@@ -289,16 +289,6 @@ int main()
TEST((s = S("\x08"), !pb_decode(&s, IntegerArray_fields, &dest))) TEST((s = S("\x08"), !pb_decode(&s, IntegerArray_fields, &dest)))
} }
{
pb_istream_t s;
IntegerContainer dest = {{0}};
COMMENT("Testing pb_decode_delimited")
TEST((s = S("\x09\x0A\x07\x0A\x05\x01\x02\x03\x04\x05"),
pb_decode_delimited(&s, IntegerContainer_fields, &dest)) &&
dest.submsg.data_count == 5)
}
if (status != 0) if (status != 0)
fprintf(stdout, "\n\nSome tests FAILED!\n"); fprintf(stdout, "\n\nSome tests FAILED!\n");

View File

@@ -1,4 +0,0 @@
Import('env')
p = env.Program(["decode_unittests.c", "#common/unittestproto.pb.c", "#common/pb_decode.o"])
env.RunTest(p)

View File

@@ -180,14 +180,12 @@ int main()
{ {
uint8_t buffer[30]; uint8_t buffer[30];
pb_ostream_t s; pb_ostream_t s;
char value[30] = "xyzzy"; char value[] = "xyzzy";
COMMENT("Test pb_enc_string") COMMENT("Test pb_enc_string")
TEST(WRITES(pb_enc_string(&s, &StringMessage_fields[0], &value), "\x05xyzzy")) TEST(WRITES(pb_enc_string(&s, NULL, &value), "\x05xyzzy"))
value[0] = '\0'; value[0] = '\0';
TEST(WRITES(pb_enc_string(&s, &StringMessage_fields[0], &value), "\x00")) TEST(WRITES(pb_enc_string(&s, NULL, &value), "\x00"))
memset(value, 'x', 30);
TEST(WRITES(pb_enc_string(&s, &StringMessage_fields[0], &value), "\x0Axxxxxxxxxx"))
} }
{ {
@@ -244,16 +242,6 @@ int main()
"\x0A\x07\x0A\x05\x01\x02\x03\x04\x05")) "\x0A\x07\x0A\x05\x01\x02\x03\x04\x05"))
} }
{
uint8_t buffer[20];
pb_ostream_t s;
IntegerContainer msg = {{5, {1,2,3,4,5}}};
COMMENT("Test pb_encode_delimited.")
TEST(WRITES(pb_encode_delimited(&s, IntegerContainer_fields, &msg),
"\x09\x0A\x07\x0A\x05\x01\x02\x03\x04\x05"))
}
{ {
uint8_t buffer[10]; uint8_t buffer[10];
pb_ostream_t s; pb_ostream_t s;

View File

@@ -1,5 +0,0 @@
# Build and run the stand-alone unit tests for the nanopb encoder part.
Import('env')
p = env.Program(["encode_unittests.c", "#common/unittestproto.pb.c", "#common/pb_encode.o"])
env.RunTest(p)

View File

@@ -1,16 +0,0 @@
# Test the support for extension fields.
Import("env")
# We use the files from the alltypes test case
incpath = env.Clone()
incpath.Append(PROTOCPATH = '#alltypes')
incpath.Append(CPPPATH = '#alltypes')
incpath.NanopbProto("extensions")
enc = incpath.Program(["encode_extensions.c", "extensions.pb.c", "#alltypes/alltypes.pb$OBJSUFFIX", "#common/pb_encode.o"])
dec = incpath.Program(["decode_extensions.c", "extensions.pb.c", "#alltypes/alltypes.pb$OBJSUFFIX", "#common/pb_decode.o"])
env.RunTest(enc)
env.RunTest([dec, "encode_extensions.output"])

View File

@@ -1,58 +0,0 @@
/* Test decoding of extension fields. */
#include <stdio.h>
#include <string.h>
#include <stdlib.h>
#include <pb_decode.h>
#include "alltypes.pb.h"
#include "extensions.pb.h"
#include "test_helpers.h"
#define TEST(x) if (!(x)) { \
printf("Test " #x " failed.\n"); \
return 2; \
}
int main(int argc, char **argv)
{
uint8_t buffer[1024];
size_t count;
pb_istream_t stream;
AllTypes alltypes = {0};
int32_t extensionfield1;
pb_extension_t ext1;
ExtensionMessage extensionfield2;
pb_extension_t ext2;
/* Read the message data */
SET_BINARY_MODE(stdin);
count = fread(buffer, 1, sizeof(buffer), stdin);
stream = pb_istream_from_buffer(buffer, count);
/* Add the extensions */
alltypes.extensions = &ext1;
ext1.type = &AllTypes_extensionfield1;
ext1.dest = &extensionfield1;
ext1.next = &ext2;
ext2.type = &ExtensionMessage_AllTypes_extensionfield2;
ext2.dest = &extensionfield2;
ext2.next = NULL;
/* Decode the message */
if (!pb_decode(&stream, AllTypes_fields, &alltypes))
{
printf("Parsing failed: %s\n", PB_GET_ERROR(&stream));
return 1;
}
/* Check that the extensions decoded properly */
TEST(extensionfield1 == 12345)
TEST(strcmp(extensionfield2.test1, "test") == 0)
TEST(extensionfield2.test2 == 54321)
return 0;
}

View File

@@ -1,50 +0,0 @@
/* Tests extension fields.
*/
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <pb_encode.h>
#include "alltypes.pb.h"
#include "extensions.pb.h"
#include "test_helpers.h"
int main(int argc, char **argv)
{
uint8_t buffer[1024];
pb_ostream_t stream;
AllTypes alltypes = {0};
int32_t extensionfield1 = 12345;
pb_extension_t ext1;
ExtensionMessage extensionfield2 = {"test", 54321};
pb_extension_t ext2;
/* Set up the extensions */
alltypes.extensions = &ext1;
ext1.type = &AllTypes_extensionfield1;
ext1.dest = &extensionfield1;
ext1.next = &ext2;
ext2.type = &ExtensionMessage_AllTypes_extensionfield2;
ext2.dest = &extensionfield2;
ext2.next = NULL;
/* Set up the output stream */
stream = pb_ostream_from_buffer(buffer, sizeof(buffer));
/* Now encode the message and check if we succeeded. */
if (pb_encode(&stream, AllTypes_fields, &alltypes))
{
SET_BINARY_MODE(stdout);
fwrite(buffer, 1, stream.bytes_written, stdout);
return 0; /* Success */
}
else
{
fprintf(stderr, "Encoding failed: %s\n", PB_GET_ERROR(&stream));
return 1; /* Failure */
}
}

View File

@@ -1 +0,0 @@
* max_size:16

View File

@@ -1,17 +0,0 @@
import 'alltypes.proto';
extend AllTypes {
optional int32 AllTypes_extensionfield1 = 255;
}
message ExtensionMessage {
extend AllTypes {
optional ExtensionMessage AllTypes_extensionfield2 = 254;
required ExtensionMessage AllTypes_extensionfield3 = 253;
repeated ExtensionMessage AllTypes_extensionfield4 = 252;
}
required string test1 = 1;
required int32 test2 = 2;
}

View File

@@ -1,14 +0,0 @@
# Test that the decoder properly handles unknown fields in the input.
Import("env")
dec = env.GetBuildPath('#basic_buffer/${PROGPREFIX}decode_buffer${PROGSUFFIX}')
env.RunTest('person_with_extra_field.output', [dec, "person_with_extra_field.pb"])
env.Compare(["person_with_extra_field.output", "person_with_extra_field.expected"])
dec = env.GetBuildPath('#basic_stream/${PROGPREFIX}decode_stream${PROGSUFFIX}')
env.RunTest('person_with_extra_field_stream.output', [dec, "person_with_extra_field.pb"])
env.Compare(["person_with_extra_field_stream.output", "person_with_extra_field.expected"])
dec2 = env.GetBuildPath('#alltypes/${PROGPREFIX}decode_alltypes${PROGSUFFIX}')
env.RunTest('alltypes_with_extra_fields.output', [dec2, 'alltypes_with_extra_fields.pb'])

View File

@@ -1,14 +0,0 @@
name: "Test Person 99"
id: 99
email: "test@person.com"
phone {
number: "555-12345678"
type: MOBILE
}
phone {
number: "99-2342"
}
phone {
number: "1234-5678"
type: WORK
}

View File

@@ -1,24 +0,0 @@
# Run the alltypes test case, but compile with PB_FIELD_16BIT=1.
# Also the .proto file has been modified to have high indexes.
Import("env")
# Take copy of the files for custom build.
c = Copy("$TARGET", "$SOURCE")
env.Command("pb_encode.c", "#../pb_encode.c", c)
env.Command("pb_decode.c", "#../pb_decode.c", c)
env.Command("encode_alltypes.c", "#alltypes/encode_alltypes.c", c)
env.Command("decode_alltypes.c", "#alltypes/decode_alltypes.c", c)
env.NanopbProto("alltypes")
# Define the compilation options
opts = env.Clone()
opts.Append(CPPDEFINES = {'PB_FIELD_16BIT': 1})
# Now build and run the test normally.
enc = opts.Program(["encode_alltypes.c", "alltypes.pb.c", "pb_encode.c"])
dec = opts.Program(["decode_alltypes.c", "alltypes.pb.c", "pb_decode.c"])
env.RunTest(enc)
env.RunTest([dec, "encode_alltypes.output"])

View File

@@ -1,3 +0,0 @@
* max_size:16
* max_count:5

View File

@@ -1,90 +0,0 @@
message SubMessage {
required string substuff1 = 1 [default = "1"];
required int32 substuff2 = 2 [default = 2];
optional fixed32 substuff3 = 65535 [default = 3];
}
message EmptyMessage {
}
enum MyEnum {
Zero = 0;
First = 1;
Second = 2;
Truth = 42;
}
message AllTypes {
required int32 req_int32 = 1;
required int64 req_int64 = 2;
required uint32 req_uint32 = 3;
required uint64 req_uint64 = 4;
required sint32 req_sint32 = 5;
required sint64 req_sint64 = 6;
required bool req_bool = 7;
required fixed32 req_fixed32 = 8;
required sfixed32 req_sfixed32= 9;
required float req_float = 10;
required fixed64 req_fixed64 = 11;
required sfixed64 req_sfixed64= 12;
required double req_double = 13;
required string req_string = 14;
required bytes req_bytes = 15;
required SubMessage req_submsg = 16;
required MyEnum req_enum = 17;
required EmptyMessage req_emptymsg = 18;
repeated int32 rep_int32 = 21;
repeated int64 rep_int64 = 22;
repeated uint32 rep_uint32 = 23;
repeated uint64 rep_uint64 = 24;
repeated sint32 rep_sint32 = 25;
repeated sint64 rep_sint64 = 26;
repeated bool rep_bool = 27;
repeated fixed32 rep_fixed32 = 28;
repeated sfixed32 rep_sfixed32= 29;
repeated float rep_float = 30;
repeated fixed64 rep_fixed64 = 10031;
repeated sfixed64 rep_sfixed64= 10032;
repeated double rep_double = 10033;
repeated string rep_string = 10034;
repeated bytes rep_bytes = 10035;
repeated SubMessage rep_submsg = 10036;
repeated MyEnum rep_enum = 10037;
repeated EmptyMessage rep_emptymsg = 10038;
optional int32 opt_int32 = 10041 [default = 4041];
optional int64 opt_int64 = 10042 [default = 4042];
optional uint32 opt_uint32 = 10043 [default = 4043];
optional uint64 opt_uint64 = 10044 [default = 4044];
optional sint32 opt_sint32 = 10045 [default = 4045];
optional sint64 opt_sint64 = 10046 [default = 4046];
optional bool opt_bool = 10047 [default = false];
optional fixed32 opt_fixed32 = 10048 [default = 4048];
optional sfixed32 opt_sfixed32= 10049 [default = 4049];
optional float opt_float = 10050 [default = 4050];
optional fixed64 opt_fixed64 = 10051 [default = 4051];
optional sfixed64 opt_sfixed64= 10052 [default = 4052];
optional double opt_double = 10053 [default = 4053];
optional string opt_string = 10054 [default = "4054"];
optional bytes opt_bytes = 10055 [default = "4055"];
optional SubMessage opt_submsg = 10056;
optional MyEnum opt_enum = 10057 [default = Second];
optional EmptyMessage opt_emptymsg = 10058;
// Just to make sure that the size of the fields has been calculated
// properly, i.e. otherwise a bug in last field might not be detected.
required int32 end = 10099;
}

View File

@@ -1,24 +0,0 @@
# Run the alltypes test case, but compile with PB_FIELD_32BIT=1.
# Also the .proto file has been modified to have high indexes.
Import("env")
# Take copy of the files for custom build.
c = Copy("$TARGET", "$SOURCE")
env.Command("pb_encode.c", "#../pb_encode.c", c)
env.Command("pb_decode.c", "#../pb_decode.c", c)
env.Command("encode_alltypes.c", "#alltypes/encode_alltypes.c", c)
env.Command("decode_alltypes.c", "#alltypes/decode_alltypes.c", c)
env.NanopbProto("alltypes")
# Define the compilation options
opts = env.Clone()
opts.Append(CPPDEFINES = {'PB_FIELD_32BIT': 1})
# Now build and run the test normally.
enc = opts.Program(["encode_alltypes.c", "alltypes.pb.c", "pb_encode.c"])
dec = opts.Program(["decode_alltypes.c", "alltypes.pb.c", "pb_decode.c"])
env.RunTest(enc)
env.RunTest([dec, "encode_alltypes.output"])

View File

@@ -1,3 +0,0 @@
* max_size:16
* max_count:5

View File

@@ -1,90 +0,0 @@
message SubMessage {
required string substuff1 = 1 [default = "1"];
required int32 substuff2 = 2 [default = 2];
optional fixed32 substuff3 = 12365535 [default = 3];
}
message EmptyMessage {
}
enum MyEnum {
Zero = 0;
First = 1;
Second = 2;
Truth = 42;
}
message AllTypes {
required int32 req_int32 = 1;
required int64 req_int64 = 2;
required uint32 req_uint32 = 3;
required uint64 req_uint64 = 4;
required sint32 req_sint32 = 5;
required sint64 req_sint64 = 6;
required bool req_bool = 7;
required fixed32 req_fixed32 = 8;
required sfixed32 req_sfixed32= 9;
required float req_float = 10;
required fixed64 req_fixed64 = 11;
required sfixed64 req_sfixed64= 12;
required double req_double = 13;
required string req_string = 14;
required bytes req_bytes = 15;
required SubMessage req_submsg = 16;
required MyEnum req_enum = 17;
required EmptyMessage req_emptymsg = 18;
repeated int32 rep_int32 = 21;
repeated int64 rep_int64 = 22;
repeated uint32 rep_uint32 = 23;
repeated uint64 rep_uint64 = 24;
repeated sint32 rep_sint32 = 25;
repeated sint64 rep_sint64 = 26;
repeated bool rep_bool = 27;
repeated fixed32 rep_fixed32 = 28;
repeated sfixed32 rep_sfixed32= 29;
repeated float rep_float = 30;
repeated fixed64 rep_fixed64 = 10031;
repeated sfixed64 rep_sfixed64= 10032;
repeated double rep_double = 10033;
repeated string rep_string = 10034;
repeated bytes rep_bytes = 10035;
repeated SubMessage rep_submsg = 10036;
repeated MyEnum rep_enum = 10037;
repeated EmptyMessage rep_emptymsg = 10038;
optional int32 opt_int32 = 10041 [default = 4041];
optional int64 opt_int64 = 10042 [default = 4042];
optional uint32 opt_uint32 = 10043 [default = 4043];
optional uint64 opt_uint64 = 10044 [default = 4044];
optional sint32 opt_sint32 = 10045 [default = 4045];
optional sint64 opt_sint64 = 10046 [default = 4046];
optional bool opt_bool = 10047 [default = false];
optional fixed32 opt_fixed32 = 10048 [default = 4048];
optional sfixed32 opt_sfixed32= 10049 [default = 4049];
optional float opt_float = 10050 [default = 4050];
optional fixed64 opt_fixed64 = 10051 [default = 4051];
optional sfixed64 opt_sfixed64= 10052 [default = 4052];
optional double opt_double = 10053 [default = 4053];
optional string opt_string = 10054 [default = "4054"];
optional bytes opt_bytes = 10055 [default = "4055"];
optional SubMessage opt_submsg = 10056;
optional MyEnum opt_enum = 10057 [default = Second];
optional EmptyMessage opt_emptymsg = 10058;
// Just to make sure that the size of the fields has been calculated
// properly, i.e. otherwise a bug in last field might not be detected.
required int32 end = 13432099;
}

View File

@@ -1,8 +0,0 @@
# Check that the decoder properly detects when required fields are missing.
Import("env")
env.NanopbProto("missing_fields")
test = env.Program(["missing_fields.c", "missing_fields.pb.c", "#common/pb_encode.o", "#common/pb_decode.o"])
env.RunTest(test)

View File

@@ -1,13 +0,0 @@
# Test that multiple .proto files don't cause name collisions.
Import("env")
incpath = env.Clone()
incpath.Append(PROTOCPATH = '#multiple_files')
incpath.NanopbProto("callbacks")
incpath.NanopbProto("callbacks2")
test = incpath.Program(["test_multiple_files.c", "callbacks.pb.c", "callbacks2.pb.c"])
env.RunTest(test)

View File

@@ -1,16 +0,0 @@
message SubMessage {
optional string stringvalue = 1;
repeated int32 int32value = 2;
repeated fixed32 fixed32value = 3;
repeated fixed64 fixed64value = 4;
}
message TestMessage {
optional string stringvalue = 1;
repeated int32 int32value = 2;
repeated fixed32 fixed32value = 3;
repeated fixed64 fixed64value = 4;
optional SubMessage submsg = 5;
repeated string repeatedstring = 6;
}

View File

@@ -1,12 +0,0 @@
/*
* Tests if this still compiles when multiple .proto files are involved.
*/
#include <stdio.h>
#include <pb_encode.h>
#include "callbacks2.pb.h"
int main()
{
return 0;
}

View File

@@ -1,23 +0,0 @@
# Run the alltypes test case, but compile with PB_NO_ERRMSG=1
Import("env")
# Take copy of the files for custom build.
c = Copy("$TARGET", "$SOURCE")
env.Command("pb_encode.c", "#../pb_encode.c", c)
env.Command("pb_decode.c", "#../pb_decode.c", c)
env.Command("alltypes.pb.h", "#alltypes/alltypes.pb.h", c)
env.Command("alltypes.pb.c", "#alltypes/alltypes.pb.c", c)
env.Command("encode_alltypes.c", "#alltypes/encode_alltypes.c", c)
env.Command("decode_alltypes.c", "#alltypes/decode_alltypes.c", c)
# Define the compilation options
opts = env.Clone()
opts.Append(CPPDEFINES = {'PB_NO_ERRMSG': 1})
# Now build and run the test normally.
enc = opts.Program(["encode_alltypes.c", "alltypes.pb.c", "pb_encode.c"])
dec = opts.Program(["decode_alltypes.c", "alltypes.pb.c", "pb_decode.c"])
env.RunTest(enc)
env.RunTest([dec, "encode_alltypes.output"])

View File

@@ -1,7 +0,0 @@
# Test that a .proto file without any messages compiles fine.
Import("env")
env.NanopbProto("no_messages")
env.Object('no_messages.pb.c')

View File

@@ -1,9 +0,0 @@
# Test that the generator options work as expected.
Import("env")
env.NanopbProto("options")
env.Object('options.pb.c')
env.Match(['options.pb.h', 'options.expected'])

View File

@@ -1,114 +0,0 @@
import subprocess
import sys
import re
try:
# Make terminal colors work on windows
import colorama
colorama.init()
except ImportError:
pass
def add_nanopb_builders(env):
'''Add the necessary builder commands for nanopb tests.'''
# Build command for building .pb from .proto using protoc
def proto_actions(source, target, env, for_signature):
esc = env['ESCAPE']
dirs = ' '.join(['-I' + esc(env.GetBuildPath(d)) for d in env['PROTOCPATH']])
return '$PROTOC $PROTOCFLAGS %s -o%s %s' % (dirs, esc(str(target[0])), esc(str(source[0])))
proto_file_builder = Builder(generator = proto_actions,
suffix = '.pb',
src_suffix = '.proto')
env.Append(BUILDERS = {'Proto': proto_file_builder})
env.SetDefault(PROTOC = 'protoc')
env.SetDefault(PROTOCPATH = ['.'])
# Build command for running nanopb generator
import os.path
def nanopb_targets(target, source, env):
basename = os.path.splitext(str(source[0]))[0]
target.append(basename + '.pb.h')
return target, source
nanopb_file_builder = Builder(action = '$NANOPB_GENERATOR $NANOPB_FLAGS $SOURCE',
suffix = '.pb.c',
src_suffix = '.pb',
emitter = nanopb_targets)
env.Append(BUILDERS = {'Nanopb': nanopb_file_builder})
gen_path = env['ESCAPE'](env.GetBuildPath("#../generator/nanopb_generator.py"))
env.SetDefault(NANOPB_GENERATOR = 'python ' + gen_path)
env.SetDefault(NANOPB_FLAGS = '-q')
# Combined method to run both protoc and nanopb generator
def run_protoc_and_nanopb(env, source):
b1 = env.Proto(source)
b2 = env.Nanopb(source)
return b1 + b2
env.AddMethod(run_protoc_and_nanopb, "NanopbProto")
# Build command that runs a test program and saves the output
def run_test(target, source, env):
if len(source) > 1:
infile = open(str(source[1]))
else:
infile = None
pipe = subprocess.Popen(str(source[0]),
stdin = infile,
stdout = open(str(target[0]), 'w'),
stderr = sys.stderr)
result = pipe.wait()
if result == 0:
print '\033[32m[ OK ]\033[0m Ran ' + str(source[0])
else:
print '\033[31m[FAIL]\033[0m Program ' + str(source[0]) + ' returned ' + str(result)
return result
run_test_builder = Builder(action = run_test,
suffix = '.output')
env.Append(BUILDERS = {'RunTest': run_test_builder})
# Build command that decodes a message using protoc
def decode_actions(source, target, env, for_signature):
esc = env['ESCAPE']
dirs = ' '.join(['-I' + esc(env.GetBuildPath(d)) for d in env['PROTOCPATH']])
return '$PROTOC $PROTOCFLAGS %s --decode=%s %s <%s >%s' % (
dirs, env['MESSAGE'], esc(str(source[1])), esc(str(source[0])), esc(str(target[0])))
decode_builder = Builder(generator = decode_actions,
suffix = '.decoded')
env.Append(BUILDERS = {'Decode': decode_builder})
# Build command that asserts that two files be equal
def compare_files(target, source, env):
data1 = open(str(source[0]), 'rb').read()
data2 = open(str(source[1]), 'rb').read()
if data1 == data2:
print '\033[32m[ OK ]\033[0m Files equal: ' + str(source[0]) + ' and ' + str(source[1])
return 0
else:
print '\033[31m[FAIL]\033[0m Files differ: ' + str(source[0]) + ' and ' + str(source[1])
return 1
compare_builder = Builder(action = compare_files,
suffix = '.equal')
env.Append(BUILDERS = {'Compare': compare_builder})
# Build command that checks that each pattern in source2 is found in source1.
def match_files(target, source, env):
data = open(str(source[0]), 'rU').read()
patterns = open(str(source[1]))
for pattern in patterns:
if pattern.strip() and not re.search(pattern.strip(), data, re.MULTILINE):
print '\033[31m[FAIL]\033[0m Pattern not found in ' + str(source[0]) + ': ' + pattern
return 1
else:
print '\033[32m[ OK ]\033[0m All patterns found in ' + str(source[0])
return 0
match_builder = Builder(action = match_files, suffix = '.matched')
env.Append(BUILDERS = {'Match': match_builder})

View File

@@ -1,7 +0,0 @@
# Test that special characters in .proto filenames work.
Import('env')
env.Proto("funny-proto+name has.characters.proto")
env.Nanopb("funny-proto+name has.characters.pb.c", "funny-proto+name has.characters.pb")
env.Object("funny-proto+name has.characters.pb.c")

View File

@@ -9,7 +9,6 @@
#include <stdio.h> #include <stdio.h>
#include <pb_decode.h> #include <pb_decode.h>
#include "person.pb.h" #include "person.pb.h"
#include "test_helpers.h"
/* This function is called once from main(), it handles /* This function is called once from main(), it handles
the decoding and printing. */ the decoding and printing. */
@@ -60,13 +59,9 @@ bool print_person(pb_istream_t *stream)
int main() int main()
{ {
uint8_t buffer[512];
pb_istream_t stream;
size_t count;
/* Read the data into buffer */ /* Read the data into buffer */
SET_BINARY_MODE(stdin); uint8_t buffer[512];
count = fread(buffer, 1, sizeof(buffer), stdin); size_t count = fread(buffer, 1, sizeof(buffer), stdin);
if (!feof(stdin)) if (!feof(stdin))
{ {
@@ -75,7 +70,7 @@ int main()
} }
/* Construct a pb_istream_t for reading from the buffer */ /* Construct a pb_istream_t for reading from the buffer */
stream = pb_istream_from_buffer(buffer, count); pb_istream_t stream = pb_istream_from_buffer(buffer, count);
/* Decode and print out the stuff */ /* Decode and print out the stuff */
if (!print_person(&stream)) if (!print_person(&stream))

View File

@@ -4,7 +4,6 @@
#include <stdio.h> #include <stdio.h>
#include <pb_decode.h> #include <pb_decode.h>
#include "person.pb.h" #include "person.pb.h"
#include "test_helpers.h"
/* This function is called once from main(), it handles /* This function is called once from main(), it handles
the decoding and printing. the decoding and printing.
@@ -70,10 +69,10 @@ bool callback(pb_istream_t *stream, uint8_t *buf, size_t count)
int main() int main()
{ {
pb_istream_t stream = {&callback, NULL, SIZE_MAX}; /* Maximum size is specified to prevent infinite length messages from
stream.state = stdin; * hanging this in the fuzz test.
SET_BINARY_MODE(stdin); */
pb_istream_t stream = {&callback, stdin, 10000};
if (!print_person(&stream)) if (!print_person(&stream))
{ {
printf("Parsing failed: %s\n", PB_GET_ERROR(&stream)); printf("Parsing failed: %s\n", PB_GET_ERROR(&stream));

Some files were not shown because too many files have changed in this diff Show More