Compare commits
3 Commits
dev_tests_
...
dev_get_ri
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
840e213b9f | ||
|
|
5b9ad17dc2 | ||
|
|
4821e7f457 |
@@ -1,2 +1,5 @@
|
||||
nanopb_pb2.py: nanopb.proto
|
||||
protoc --python_out=. -I /usr/include -I . nanopb.proto
|
||||
|
||||
plugin_pb2.py: plugin.proto
|
||||
protoc --python_out=. -I /usr/include -I . plugin.proto
|
||||
|
||||
168
generator/nanopb_generator.py
Normal file → Executable file
168
generator/nanopb_generator.py
Normal file → Executable file
@@ -1,3 +1,5 @@
|
||||
#!/usr/bin/python
|
||||
|
||||
'''Generate header file for nanopb from a ProtoBuf FileDescriptorSet.'''
|
||||
nanopb_version = "nanopb-0.2.3-dev"
|
||||
|
||||
@@ -244,13 +246,14 @@ class Field:
|
||||
'''Return the pb_field_t initializer to use in the constant array.
|
||||
prev_field_name is the name of the previous field or None.
|
||||
'''
|
||||
result = ' PB_FIELD(%3d, ' % self.tag
|
||||
result = ' PB_FIELD2(%3d, ' % self.tag
|
||||
result += '%-8s, ' % self.pbtype
|
||||
result += '%s, ' % self.rules
|
||||
result += '%s, ' % self.allocation
|
||||
result += '%s, ' % self.struct_name
|
||||
result += '%s, ' % self.name
|
||||
result += '%s, ' % (prev_field_name or self.name)
|
||||
result += '%s, ' % ("first" if not prev_field_name else "other")
|
||||
|
||||
if self.pbtype == 'MESSAGE':
|
||||
result += '&%s_fields)' % self.submsgname
|
||||
@@ -602,7 +605,7 @@ def generate_header(dependencies, headername, enums, messages, extensions, optio
|
||||
# End of header
|
||||
yield '\n#endif\n'
|
||||
|
||||
def generate_source(headername, enums, messages, extensions):
|
||||
def generate_source(headername, enums, messages, extensions, options):
|
||||
'''Generate content for a source file.'''
|
||||
|
||||
yield '/* Automatically generated nanopb constant definitions */\n'
|
||||
@@ -780,73 +783,126 @@ optparser.add_option("-v", "--verbose", dest="verbose", action="store_true", def
|
||||
optparser.add_option("-s", dest="settings", metavar="OPTION:VALUE", action="append", default=[],
|
||||
help="Set generator option (max_size, max_count etc.).")
|
||||
|
||||
def process(filenames, options):
|
||||
'''Process the files given on the command line.'''
|
||||
def process_file(filename, fdesc, options):
|
||||
'''Process a single file.
|
||||
filename: The full path to the .proto or .pb source file, as string.
|
||||
fdesc: The loaded FileDescriptorSet, or None to read from the input file.
|
||||
options: Command line options as they come from OptionsParser.
|
||||
|
||||
Returns a dict:
|
||||
{'headername': Name of header file,
|
||||
'headerdata': Data for the .h header file,
|
||||
'sourcename': Name of the source code file,
|
||||
'sourcedata': Data for the .c source code file
|
||||
}
|
||||
'''
|
||||
toplevel_options = nanopb_pb2.NanoPBOptions()
|
||||
for s in options.settings:
|
||||
text_format.Merge(s, toplevel_options)
|
||||
|
||||
if not fdesc:
|
||||
data = open(filename, 'rb').read()
|
||||
fdesc = descriptor.FileDescriptorSet.FromString(data).file[0]
|
||||
|
||||
# Check if there is a separate .options file
|
||||
try:
|
||||
optfilename = options.options_file % os.path.splitext(filename)[0]
|
||||
except TypeError:
|
||||
# No %s specified, use the filename as-is
|
||||
optfilename = options.options_file
|
||||
|
||||
if options.verbose:
|
||||
print 'Reading options from ' + optfilename
|
||||
|
||||
if os.path.isfile(optfilename):
|
||||
Globals.separate_options = read_options_file(open(optfilename, "rU"))
|
||||
else:
|
||||
Globals.separate_options = []
|
||||
|
||||
# Parse the file
|
||||
file_options = get_nanopb_suboptions(fdesc, toplevel_options, Names([filename]))
|
||||
enums, messages, extensions = parse_file(fdesc, file_options)
|
||||
|
||||
# Decide the file names
|
||||
noext = os.path.splitext(filename)[0]
|
||||
headername = noext + '.' + options.extension + '.h'
|
||||
sourcename = noext + '.' + options.extension + '.c'
|
||||
headerbasename = os.path.basename(headername)
|
||||
|
||||
# List of .proto files that should not be included in the C header file
|
||||
# even if they are mentioned in the source .proto.
|
||||
excludes = ['nanopb.proto', 'google/protobuf/descriptor.proto'] + options.exclude
|
||||
dependencies = [d for d in fdesc.dependency if d not in excludes]
|
||||
|
||||
headerdata = ''.join(generate_header(dependencies, headerbasename, enums,
|
||||
messages, extensions, options))
|
||||
|
||||
sourcedata = ''.join(generate_source(headerbasename, enums,
|
||||
messages, extensions, options))
|
||||
|
||||
return {'headername': headername, 'headerdata': headerdata,
|
||||
'sourcename': sourcename, 'sourcedata': sourcedata}
|
||||
|
||||
def main_cli():
|
||||
'''Main function when invoked directly from the command line.'''
|
||||
|
||||
options, filenames = optparser.parse_args()
|
||||
|
||||
if not filenames:
|
||||
optparser.print_help()
|
||||
return False
|
||||
sys.exit(1)
|
||||
|
||||
if options.quiet:
|
||||
options.verbose = False
|
||||
|
||||
Globals.verbose_options = options.verbose
|
||||
|
||||
toplevel_options = nanopb_pb2.NanoPBOptions()
|
||||
for s in options.settings:
|
||||
text_format.Merge(s, toplevel_options)
|
||||
|
||||
for filename in filenames:
|
||||
data = open(filename, 'rb').read()
|
||||
fdesc = descriptor.FileDescriptorSet.FromString(data)
|
||||
|
||||
# Check if any separate options are specified
|
||||
try:
|
||||
optfilename = options.options_file % os.path.splitext(filename)[0]
|
||||
except TypeError:
|
||||
# No %s specified, use the filename as-is
|
||||
optfilename = options.options_file
|
||||
|
||||
if options.verbose:
|
||||
print 'Reading options from ' + optfilename
|
||||
|
||||
if os.path.isfile(optfilename):
|
||||
Globals.separate_options = read_options_file(open(optfilename, "rU"))
|
||||
else:
|
||||
Globals.separate_options = []
|
||||
|
||||
# Parse the file
|
||||
file_options = get_nanopb_suboptions(fdesc.file[0], toplevel_options, Names([filename]))
|
||||
enums, messages, extensions = parse_file(fdesc.file[0], file_options)
|
||||
|
||||
noext = os.path.splitext(filename)[0]
|
||||
headername = noext + '.' + options.extension + '.h'
|
||||
sourcename = noext + '.' + options.extension + '.c'
|
||||
headerbasename = os.path.basename(headername)
|
||||
results = process_file(filename, None, options)
|
||||
|
||||
if not options.quiet:
|
||||
print "Writing to " + headername + " and " + sourcename
|
||||
|
||||
# List of .proto files that should not be included in the C header file
|
||||
# even if they are mentioned in the source .proto.
|
||||
excludes = ['nanopb.proto', 'google/protobuf/descriptor.proto'] + options.exclude
|
||||
dependencies = [d for d in fdesc.file[0].dependency if d not in excludes]
|
||||
|
||||
header = open(headername, 'w')
|
||||
for part in generate_header(dependencies, headerbasename, enums,
|
||||
messages, extensions, options):
|
||||
header.write(part)
|
||||
print "Writing to " + results['headername'] + " and " + results['sourcename']
|
||||
|
||||
open(results['headername'], 'w').write(results['headerdata'])
|
||||
open(results['sourcename'], 'w').write(results['sourcedata'])
|
||||
|
||||
source = open(sourcename, 'w')
|
||||
for part in generate_source(headerbasename, enums, messages, extensions):
|
||||
source.write(part)
|
||||
def main_plugin():
|
||||
'''Main function when invoked as a protoc plugin.'''
|
||||
|
||||
return True
|
||||
import plugin_pb2
|
||||
data = sys.stdin.read()
|
||||
request = plugin_pb2.CodeGeneratorRequest.FromString(data)
|
||||
|
||||
import shlex
|
||||
args = shlex.split(request.parameter)
|
||||
options, dummy = optparser.parse_args(args)
|
||||
|
||||
# We can't go printing stuff to stdout
|
||||
Globals.verbose_options = False
|
||||
options.verbose = False
|
||||
options.quiet = True
|
||||
|
||||
response = plugin_pb2.CodeGeneratorResponse()
|
||||
|
||||
for filename in request.file_to_generate:
|
||||
for fdesc in request.proto_file:
|
||||
if fdesc.name == filename:
|
||||
results = process_file(filename, fdesc, options)
|
||||
|
||||
f = response.file.add()
|
||||
f.name = results['headername']
|
||||
f.content = results['headerdata']
|
||||
|
||||
f = response.file.add()
|
||||
f.name = results['sourcename']
|
||||
f.content = results['sourcedata']
|
||||
|
||||
sys.stdout.write(response.SerializeToString())
|
||||
|
||||
if __name__ == '__main__':
|
||||
options, filenames = optparser.parse_args()
|
||||
status = process(filenames, options)
|
||||
|
||||
if not status:
|
||||
sys.exit(1)
|
||||
|
||||
# Check if we are running as a plugin under protoc
|
||||
if 'protoc-gen-' in sys.argv[0]:
|
||||
main_plugin()
|
||||
else:
|
||||
main_cli()
|
||||
|
||||
|
||||
145
generator/plugin.proto
Normal file
145
generator/plugin.proto
Normal file
@@ -0,0 +1,145 @@
|
||||
// Protocol Buffers - Google's data interchange format
|
||||
// Copyright 2008 Google Inc. All rights reserved.
|
||||
// http://code.google.com/p/protobuf/
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above
|
||||
// copyright notice, this list of conditions and the following disclaimer
|
||||
// in the documentation and/or other materials provided with the
|
||||
// distribution.
|
||||
// * Neither the name of Google Inc. nor the names of its
|
||||
// contributors may be used to endorse or promote products derived from
|
||||
// this software without specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
// Author: kenton@google.com (Kenton Varda)
|
||||
//
|
||||
// WARNING: The plugin interface is currently EXPERIMENTAL and is subject to
|
||||
// change.
|
||||
//
|
||||
// protoc (aka the Protocol Compiler) can be extended via plugins. A plugin is
|
||||
// just a program that reads a CodeGeneratorRequest from stdin and writes a
|
||||
// CodeGeneratorResponse to stdout.
|
||||
//
|
||||
// Plugins written using C++ can use google/protobuf/compiler/plugin.h instead
|
||||
// of dealing with the raw protocol defined here.
|
||||
//
|
||||
// A plugin executable needs only to be placed somewhere in the path. The
|
||||
// plugin should be named "protoc-gen-$NAME", and will then be used when the
|
||||
// flag "--${NAME}_out" is passed to protoc.
|
||||
|
||||
package google.protobuf.compiler;
|
||||
|
||||
import "google/protobuf/descriptor.proto";
|
||||
|
||||
// An encoded CodeGeneratorRequest is written to the plugin's stdin.
|
||||
message CodeGeneratorRequest {
|
||||
// The .proto files that were explicitly listed on the command-line. The
|
||||
// code generator should generate code only for these files. Each file's
|
||||
// descriptor will be included in proto_file, below.
|
||||
repeated string file_to_generate = 1;
|
||||
|
||||
// The generator parameter passed on the command-line.
|
||||
optional string parameter = 2;
|
||||
|
||||
// FileDescriptorProtos for all files in files_to_generate and everything
|
||||
// they import. The files will appear in topological order, so each file
|
||||
// appears before any file that imports it.
|
||||
//
|
||||
// protoc guarantees that all proto_files will be written after
|
||||
// the fields above, even though this is not technically guaranteed by the
|
||||
// protobuf wire format. This theoretically could allow a plugin to stream
|
||||
// in the FileDescriptorProtos and handle them one by one rather than read
|
||||
// the entire set into memory at once. However, as of this writing, this
|
||||
// is not similarly optimized on protoc's end -- it will store all fields in
|
||||
// memory at once before sending them to the plugin.
|
||||
repeated FileDescriptorProto proto_file = 15;
|
||||
}
|
||||
|
||||
// The plugin writes an encoded CodeGeneratorResponse to stdout.
|
||||
message CodeGeneratorResponse {
|
||||
// Error message. If non-empty, code generation failed. The plugin process
|
||||
// should exit with status code zero even if it reports an error in this way.
|
||||
//
|
||||
// This should be used to indicate errors in .proto files which prevent the
|
||||
// code generator from generating correct code. Errors which indicate a
|
||||
// problem in protoc itself -- such as the input CodeGeneratorRequest being
|
||||
// unparseable -- should be reported by writing a message to stderr and
|
||||
// exiting with a non-zero status code.
|
||||
optional string error = 1;
|
||||
|
||||
// Represents a single generated file.
|
||||
message File {
|
||||
// The file name, relative to the output directory. The name must not
|
||||
// contain "." or ".." components and must be relative, not be absolute (so,
|
||||
// the file cannot lie outside the output directory). "/" must be used as
|
||||
// the path separator, not "\".
|
||||
//
|
||||
// If the name is omitted, the content will be appended to the previous
|
||||
// file. This allows the generator to break large files into small chunks,
|
||||
// and allows the generated text to be streamed back to protoc so that large
|
||||
// files need not reside completely in memory at one time. Note that as of
|
||||
// this writing protoc does not optimize for this -- it will read the entire
|
||||
// CodeGeneratorResponse before writing files to disk.
|
||||
optional string name = 1;
|
||||
|
||||
// If non-empty, indicates that the named file should already exist, and the
|
||||
// content here is to be inserted into that file at a defined insertion
|
||||
// point. This feature allows a code generator to extend the output
|
||||
// produced by another code generator. The original generator may provide
|
||||
// insertion points by placing special annotations in the file that look
|
||||
// like:
|
||||
// @@protoc_insertion_point(NAME)
|
||||
// The annotation can have arbitrary text before and after it on the line,
|
||||
// which allows it to be placed in a comment. NAME should be replaced with
|
||||
// an identifier naming the point -- this is what other generators will use
|
||||
// as the insertion_point. Code inserted at this point will be placed
|
||||
// immediately above the line containing the insertion point (thus multiple
|
||||
// insertions to the same point will come out in the order they were added).
|
||||
// The double-@ is intended to make it unlikely that the generated code
|
||||
// could contain things that look like insertion points by accident.
|
||||
//
|
||||
// For example, the C++ code generator places the following line in the
|
||||
// .pb.h files that it generates:
|
||||
// // @@protoc_insertion_point(namespace_scope)
|
||||
// This line appears within the scope of the file's package namespace, but
|
||||
// outside of any particular class. Another plugin can then specify the
|
||||
// insertion_point "namespace_scope" to generate additional classes or
|
||||
// other declarations that should be placed in this scope.
|
||||
//
|
||||
// Note that if the line containing the insertion point begins with
|
||||
// whitespace, the same whitespace will be added to every line of the
|
||||
// inserted text. This is useful for languages like Python, where
|
||||
// indentation matters. In these languages, the insertion point comment
|
||||
// should be indented the same amount as any inserted code will need to be
|
||||
// in order to work correctly in that context.
|
||||
//
|
||||
// The code generator that generates the initial file and the one which
|
||||
// inserts into it must both run as part of a single invocation of protoc.
|
||||
// Code generators are executed in the order in which they appear on the
|
||||
// command line.
|
||||
//
|
||||
// If |insertion_point| is present, |name| must also be present.
|
||||
optional string insertion_point = 2;
|
||||
|
||||
// The file contents.
|
||||
optional string content = 15;
|
||||
}
|
||||
repeated File file = 15;
|
||||
}
|
||||
161
generator/plugin_pb2.py
Normal file
161
generator/plugin_pb2.py
Normal file
@@ -0,0 +1,161 @@
|
||||
# Generated by the protocol buffer compiler. DO NOT EDIT!
|
||||
|
||||
from google.protobuf import descriptor
|
||||
from google.protobuf import message
|
||||
from google.protobuf import reflection
|
||||
from google.protobuf import descriptor_pb2
|
||||
# @@protoc_insertion_point(imports)
|
||||
|
||||
|
||||
import google.protobuf.descriptor_pb2
|
||||
|
||||
DESCRIPTOR = descriptor.FileDescriptor(
|
||||
name='plugin.proto',
|
||||
package='google.protobuf.compiler',
|
||||
serialized_pb='\n\x0cplugin.proto\x12\x18google.protobuf.compiler\x1a google/protobuf/descriptor.proto\"}\n\x14\x43odeGeneratorRequest\x12\x18\n\x10\x66ile_to_generate\x18\x01 \x03(\t\x12\x11\n\tparameter\x18\x02 \x01(\t\x12\x38\n\nproto_file\x18\x0f \x03(\x0b\x32$.google.protobuf.FileDescriptorProto\"\xaa\x01\n\x15\x43odeGeneratorResponse\x12\r\n\x05\x65rror\x18\x01 \x01(\t\x12\x42\n\x04\x66ile\x18\x0f \x03(\x0b\x32\x34.google.protobuf.compiler.CodeGeneratorResponse.File\x1a>\n\x04\x46ile\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x17\n\x0finsertion_point\x18\x02 \x01(\t\x12\x0f\n\x07\x63ontent\x18\x0f \x01(\t')
|
||||
|
||||
|
||||
|
||||
|
||||
_CODEGENERATORREQUEST = descriptor.Descriptor(
|
||||
name='CodeGeneratorRequest',
|
||||
full_name='google.protobuf.compiler.CodeGeneratorRequest',
|
||||
filename=None,
|
||||
file=DESCRIPTOR,
|
||||
containing_type=None,
|
||||
fields=[
|
||||
descriptor.FieldDescriptor(
|
||||
name='file_to_generate', full_name='google.protobuf.compiler.CodeGeneratorRequest.file_to_generate', index=0,
|
||||
number=1, type=9, cpp_type=9, label=3,
|
||||
has_default_value=False, default_value=[],
|
||||
message_type=None, enum_type=None, containing_type=None,
|
||||
is_extension=False, extension_scope=None,
|
||||
options=None),
|
||||
descriptor.FieldDescriptor(
|
||||
name='parameter', full_name='google.protobuf.compiler.CodeGeneratorRequest.parameter', index=1,
|
||||
number=2, type=9, cpp_type=9, label=1,
|
||||
has_default_value=False, default_value=unicode("", "utf-8"),
|
||||
message_type=None, enum_type=None, containing_type=None,
|
||||
is_extension=False, extension_scope=None,
|
||||
options=None),
|
||||
descriptor.FieldDescriptor(
|
||||
name='proto_file', full_name='google.protobuf.compiler.CodeGeneratorRequest.proto_file', index=2,
|
||||
number=15, type=11, cpp_type=10, label=3,
|
||||
has_default_value=False, default_value=[],
|
||||
message_type=None, enum_type=None, containing_type=None,
|
||||
is_extension=False, extension_scope=None,
|
||||
options=None),
|
||||
],
|
||||
extensions=[
|
||||
],
|
||||
nested_types=[],
|
||||
enum_types=[
|
||||
],
|
||||
options=None,
|
||||
is_extendable=False,
|
||||
extension_ranges=[],
|
||||
serialized_start=76,
|
||||
serialized_end=201,
|
||||
)
|
||||
|
||||
|
||||
_CODEGENERATORRESPONSE_FILE = descriptor.Descriptor(
|
||||
name='File',
|
||||
full_name='google.protobuf.compiler.CodeGeneratorResponse.File',
|
||||
filename=None,
|
||||
file=DESCRIPTOR,
|
||||
containing_type=None,
|
||||
fields=[
|
||||
descriptor.FieldDescriptor(
|
||||
name='name', full_name='google.protobuf.compiler.CodeGeneratorResponse.File.name', index=0,
|
||||
number=1, type=9, cpp_type=9, label=1,
|
||||
has_default_value=False, default_value=unicode("", "utf-8"),
|
||||
message_type=None, enum_type=None, containing_type=None,
|
||||
is_extension=False, extension_scope=None,
|
||||
options=None),
|
||||
descriptor.FieldDescriptor(
|
||||
name='insertion_point', full_name='google.protobuf.compiler.CodeGeneratorResponse.File.insertion_point', index=1,
|
||||
number=2, type=9, cpp_type=9, label=1,
|
||||
has_default_value=False, default_value=unicode("", "utf-8"),
|
||||
message_type=None, enum_type=None, containing_type=None,
|
||||
is_extension=False, extension_scope=None,
|
||||
options=None),
|
||||
descriptor.FieldDescriptor(
|
||||
name='content', full_name='google.protobuf.compiler.CodeGeneratorResponse.File.content', index=2,
|
||||
number=15, type=9, cpp_type=9, label=1,
|
||||
has_default_value=False, default_value=unicode("", "utf-8"),
|
||||
message_type=None, enum_type=None, containing_type=None,
|
||||
is_extension=False, extension_scope=None,
|
||||
options=None),
|
||||
],
|
||||
extensions=[
|
||||
],
|
||||
nested_types=[],
|
||||
enum_types=[
|
||||
],
|
||||
options=None,
|
||||
is_extendable=False,
|
||||
extension_ranges=[],
|
||||
serialized_start=312,
|
||||
serialized_end=374,
|
||||
)
|
||||
|
||||
_CODEGENERATORRESPONSE = descriptor.Descriptor(
|
||||
name='CodeGeneratorResponse',
|
||||
full_name='google.protobuf.compiler.CodeGeneratorResponse',
|
||||
filename=None,
|
||||
file=DESCRIPTOR,
|
||||
containing_type=None,
|
||||
fields=[
|
||||
descriptor.FieldDescriptor(
|
||||
name='error', full_name='google.protobuf.compiler.CodeGeneratorResponse.error', index=0,
|
||||
number=1, type=9, cpp_type=9, label=1,
|
||||
has_default_value=False, default_value=unicode("", "utf-8"),
|
||||
message_type=None, enum_type=None, containing_type=None,
|
||||
is_extension=False, extension_scope=None,
|
||||
options=None),
|
||||
descriptor.FieldDescriptor(
|
||||
name='file', full_name='google.protobuf.compiler.CodeGeneratorResponse.file', index=1,
|
||||
number=15, type=11, cpp_type=10, label=3,
|
||||
has_default_value=False, default_value=[],
|
||||
message_type=None, enum_type=None, containing_type=None,
|
||||
is_extension=False, extension_scope=None,
|
||||
options=None),
|
||||
],
|
||||
extensions=[
|
||||
],
|
||||
nested_types=[_CODEGENERATORRESPONSE_FILE, ],
|
||||
enum_types=[
|
||||
],
|
||||
options=None,
|
||||
is_extendable=False,
|
||||
extension_ranges=[],
|
||||
serialized_start=204,
|
||||
serialized_end=374,
|
||||
)
|
||||
|
||||
_CODEGENERATORREQUEST.fields_by_name['proto_file'].message_type = google.protobuf.descriptor_pb2._FILEDESCRIPTORPROTO
|
||||
_CODEGENERATORRESPONSE_FILE.containing_type = _CODEGENERATORRESPONSE;
|
||||
_CODEGENERATORRESPONSE.fields_by_name['file'].message_type = _CODEGENERATORRESPONSE_FILE
|
||||
DESCRIPTOR.message_types_by_name['CodeGeneratorRequest'] = _CODEGENERATORREQUEST
|
||||
DESCRIPTOR.message_types_by_name['CodeGeneratorResponse'] = _CODEGENERATORRESPONSE
|
||||
|
||||
class CodeGeneratorRequest(message.Message):
|
||||
__metaclass__ = reflection.GeneratedProtocolMessageType
|
||||
DESCRIPTOR = _CODEGENERATORREQUEST
|
||||
|
||||
# @@protoc_insertion_point(class_scope:google.protobuf.compiler.CodeGeneratorRequest)
|
||||
|
||||
class CodeGeneratorResponse(message.Message):
|
||||
__metaclass__ = reflection.GeneratedProtocolMessageType
|
||||
|
||||
class File(message.Message):
|
||||
__metaclass__ = reflection.GeneratedProtocolMessageType
|
||||
DESCRIPTOR = _CODEGENERATORRESPONSE_FILE
|
||||
|
||||
# @@protoc_insertion_point(class_scope:google.protobuf.compiler.CodeGeneratorResponse.File)
|
||||
DESCRIPTOR = _CODEGENERATORRESPONSE
|
||||
|
||||
# @@protoc_insertion_point(class_scope:google.protobuf.compiler.CodeGeneratorResponse)
|
||||
|
||||
# @@protoc_insertion_point(module_scope)
|
||||
56
pb.h
56
pb.h
@@ -320,58 +320,66 @@ struct _pb_extension_t {
|
||||
};
|
||||
|
||||
/* These macros are used to declare pb_field_t's in the constant array. */
|
||||
/* Size of a structure member, in bytes. */
|
||||
#define pb_membersize(st, m) (sizeof ((st*)0)->m)
|
||||
/* Number of entries in an array. */
|
||||
#define pb_arraysize(st, m) (pb_membersize(st, m) / pb_membersize(st, m[0]))
|
||||
/* Delta from start of one member to the start of another member. */
|
||||
#define pb_delta(st, m1, m2) ((int)offsetof(st, m1) - (int)offsetof(st, m2))
|
||||
#define pb_delta_end(st, m1, m2) (int)(offsetof(st, m1) == offsetof(st, m2) \
|
||||
? offsetof(st, m1) \
|
||||
: offsetof(st, m1) - offsetof(st, m2) - pb_membersize(st, m2))
|
||||
/* Delta from start of structure to member. */
|
||||
#define pb_fielddelta_first(st, m1, m2) (offsetof(st, m1))
|
||||
/* Delta from end of one field to start of another field. */
|
||||
#define pb_fielddelta_other(st, m1, m2) (offsetof(st, m1) - offsetof(st, m2) - pb_membersize(st, m2))
|
||||
/* Choose between pb_fielddelta_first and pb_fielddelta_other (backwards compatibility) */
|
||||
#define pb_fielddelta_choose(st, m1, m2) (int)(offsetof(st, m1) == offsetof(st, m2) \
|
||||
? pb_fielddelta_first(st, m1, m2) \
|
||||
: pb_fielddelta_other(st, m1, m2))
|
||||
#define PB_LAST_FIELD {0,(pb_type_t) 0,0,0,0,0,0}
|
||||
|
||||
/* Required fields are the simplest. They just have delta (padding) from
|
||||
* previous field end, and the size of the field. Pointer is used for
|
||||
* submessages and default values.
|
||||
*/
|
||||
#define PB_REQUIRED_STATIC(tag, st, m, pm, ltype, ptr) \
|
||||
#define PB_REQUIRED_STATIC(tag, st, m, fd, ltype, ptr) \
|
||||
{tag, PB_ATYPE_STATIC | PB_HTYPE_REQUIRED | ltype, \
|
||||
pb_delta_end(st, m, pm), 0, pb_membersize(st, m), 0, ptr}
|
||||
fd, 0, pb_membersize(st, m), 0, ptr}
|
||||
|
||||
/* Optional fields add the delta to the has_ variable. */
|
||||
#define PB_OPTIONAL_STATIC(tag, st, m, pm, ltype, ptr) \
|
||||
#define PB_OPTIONAL_STATIC(tag, st, m, fd, ltype, ptr) \
|
||||
{tag, PB_ATYPE_STATIC | PB_HTYPE_OPTIONAL | ltype, \
|
||||
pb_delta_end(st, m, pm), \
|
||||
fd, \
|
||||
pb_delta(st, has_ ## m, m), \
|
||||
pb_membersize(st, m), 0, ptr}
|
||||
|
||||
/* Repeated fields have a _count field and also the maximum number of entries. */
|
||||
#define PB_REPEATED_STATIC(tag, st, m, pm, ltype, ptr) \
|
||||
#define PB_REPEATED_STATIC(tag, st, m, fd, ltype, ptr) \
|
||||
{tag, PB_ATYPE_STATIC | PB_HTYPE_REPEATED | ltype, \
|
||||
pb_delta_end(st, m, pm), \
|
||||
fd, \
|
||||
pb_delta(st, m ## _count, m), \
|
||||
pb_membersize(st, m[0]), \
|
||||
pb_arraysize(st, m), ptr}
|
||||
|
||||
/* Callbacks are much like required fields except with special datatype. */
|
||||
#define PB_REQUIRED_CALLBACK(tag, st, m, pm, ltype, ptr) \
|
||||
#define PB_REQUIRED_CALLBACK(tag, st, m, fd, ltype, ptr) \
|
||||
{tag, PB_ATYPE_CALLBACK | PB_HTYPE_REQUIRED | ltype, \
|
||||
pb_delta_end(st, m, pm), 0, pb_membersize(st, m), 0, ptr}
|
||||
fd, 0, pb_membersize(st, m), 0, ptr}
|
||||
|
||||
#define PB_OPTIONAL_CALLBACK(tag, st, m, pm, ltype, ptr) \
|
||||
#define PB_OPTIONAL_CALLBACK(tag, st, m, fd, ltype, ptr) \
|
||||
{tag, PB_ATYPE_CALLBACK | PB_HTYPE_OPTIONAL | ltype, \
|
||||
pb_delta_end(st, m, pm), 0, pb_membersize(st, m), 0, ptr}
|
||||
fd, 0, pb_membersize(st, m), 0, ptr}
|
||||
|
||||
#define PB_REPEATED_CALLBACK(tag, st, m, pm, ltype, ptr) \
|
||||
#define PB_REPEATED_CALLBACK(tag, st, m, fd, ltype, ptr) \
|
||||
{tag, PB_ATYPE_CALLBACK | PB_HTYPE_REPEATED | ltype, \
|
||||
pb_delta_end(st, m, pm), 0, pb_membersize(st, m), 0, ptr}
|
||||
fd, 0, pb_membersize(st, m), 0, ptr}
|
||||
|
||||
/* Optional extensions don't have the has_ field, as that would be redundant. */
|
||||
#define PB_OPTEXT_STATIC(tag, st, m, pm, ltype, ptr) \
|
||||
#define PB_OPTEXT_STATIC(tag, st, m, fd, ltype, ptr) \
|
||||
{tag, PB_ATYPE_STATIC | PB_HTYPE_OPTIONAL | ltype, \
|
||||
0, \
|
||||
0, \
|
||||
pb_membersize(st, m), 0, ptr}
|
||||
|
||||
#define PB_OPTEXT_CALLBACK(tag, st, m, pm, ltype, ptr) \
|
||||
#define PB_OPTEXT_CALLBACK(tag, st, m, fd, ltype, ptr) \
|
||||
{tag, PB_ATYPE_CALLBACK | PB_HTYPE_OPTIONAL | ltype, \
|
||||
0, 0, pb_membersize(st, m), 0, ptr}
|
||||
|
||||
@@ -410,8 +418,18 @@ struct _pb_extension_t {
|
||||
*/
|
||||
|
||||
#define PB_FIELD(tag, type, rules, allocation, message, field, prevfield, ptr) \
|
||||
PB_ ## rules ## _ ## allocation(tag, message, field, prevfield, \
|
||||
PB_LTYPE_MAP_ ## type, ptr)
|
||||
PB_ ## rules ## _ ## allocation(tag, message, field, \
|
||||
pb_fielddelta_choose(message, field, prevfield), \
|
||||
PB_LTYPE_MAP_ ## type, ptr)
|
||||
|
||||
/* This is a new version of the macro used by nanopb generator from
|
||||
* version 0.2.3 onwards. It avoids the use of a ternary expression in
|
||||
* the initialization, which confused some compilers.
|
||||
*/
|
||||
#define PB_FIELD2(tag, type, rules, allocation, message, field, prevfield, pos, ptr) \
|
||||
PB_ ## rules ## _ ## allocation(tag, message, field, \
|
||||
pb_fielddelta_ ## pos(message, field, prevfield), \
|
||||
PB_LTYPE_MAP_ ## type, ptr)
|
||||
|
||||
|
||||
/* These macros are used for giving out error messages.
|
||||
|
||||
66
pb_decode.h
66
pb_decode.h
@@ -12,6 +12,39 @@
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
/* Structure for defining custom input streams. You will need to provide
|
||||
* a callback function to read the bytes from your storage, which can be
|
||||
* for example a file or a network socket.
|
||||
*
|
||||
* The callback must conform to these rules:
|
||||
*
|
||||
* 1) Return false on IO errors. This will cause decoding to abort.
|
||||
* 2) You can use state to store your own data (e.g. buffer pointer),
|
||||
* and rely on pb_read to verify that no-body reads past bytes_left.
|
||||
* 3) Your callback may be used with substreams, in which case bytes_left
|
||||
* is different than from the main stream. Don't use bytes_left to compute
|
||||
* any pointers.
|
||||
*/
|
||||
struct _pb_istream_t
|
||||
{
|
||||
#ifdef PB_BUFFER_ONLY
|
||||
/* Callback pointer is not used in buffer-only configuration.
|
||||
* Having an int pointer here allows binary compatibility but
|
||||
* gives an error if someone tries to assign callback function.
|
||||
*/
|
||||
int *callback;
|
||||
#else
|
||||
bool (*callback)(pb_istream_t *stream, uint8_t *buf, size_t count);
|
||||
#endif
|
||||
|
||||
void *state; /* Free field for use by callback implementation */
|
||||
size_t bytes_left;
|
||||
|
||||
#ifndef PB_NO_ERRMSG
|
||||
const char *errmsg;
|
||||
#endif
|
||||
};
|
||||
|
||||
/***************************
|
||||
* Main decoding functions *
|
||||
***************************/
|
||||
@@ -66,39 +99,6 @@ pb_istream_t pb_istream_from_buffer(uint8_t *buf, size_t bufsize);
|
||||
*/
|
||||
bool pb_read(pb_istream_t *stream, uint8_t *buf, size_t count);
|
||||
|
||||
/* Structure for defining custom input streams. You will need to provide
|
||||
* a callback function to read the bytes from your storage, which can be
|
||||
* for example a file or a network socket.
|
||||
*
|
||||
* The callback must conform to these rules:
|
||||
*
|
||||
* 1) Return false on IO errors. This will cause decoding to abort.
|
||||
* 2) You can use state to store your own data (e.g. buffer pointer),
|
||||
* and rely on pb_read to verify that no-body reads past bytes_left.
|
||||
* 3) Your callback may be used with substreams, in which case bytes_left
|
||||
* is different than from the main stream. Don't use bytes_left to compute
|
||||
* any pointers.
|
||||
*/
|
||||
struct _pb_istream_t
|
||||
{
|
||||
#ifdef PB_BUFFER_ONLY
|
||||
/* Callback pointer is not used in buffer-only configuration.
|
||||
* Having an int pointer here allows binary compatibility but
|
||||
* gives an error if someone tries to assign callback function.
|
||||
*/
|
||||
int *callback;
|
||||
#else
|
||||
bool (*callback)(pb_istream_t *stream, uint8_t *buf, size_t count);
|
||||
#endif
|
||||
|
||||
void *state; /* Free field for use by callback implementation */
|
||||
size_t bytes_left;
|
||||
|
||||
#ifndef PB_NO_ERRMSG
|
||||
const char *errmsg;
|
||||
#endif
|
||||
};
|
||||
|
||||
|
||||
/************************************************
|
||||
* Helper functions for writing field callbacks *
|
||||
|
||||
68
pb_encode.h
68
pb_encode.h
@@ -12,6 +12,40 @@
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
/* Structure for defining custom output streams. You will need to provide
|
||||
* a callback function to write the bytes to your storage, which can be
|
||||
* for example a file or a network socket.
|
||||
*
|
||||
* The callback must conform to these rules:
|
||||
*
|
||||
* 1) Return false on IO errors. This will cause encoding to abort.
|
||||
* 2) You can use state to store your own data (e.g. buffer pointer).
|
||||
* 3) pb_write will update bytes_written after your callback runs.
|
||||
* 4) Substreams will modify max_size and bytes_written. Don't use them
|
||||
* to calculate any pointers.
|
||||
*/
|
||||
struct _pb_ostream_t
|
||||
{
|
||||
#ifdef PB_BUFFER_ONLY
|
||||
/* Callback pointer is not used in buffer-only configuration.
|
||||
* Having an int pointer here allows binary compatibility but
|
||||
* gives an error if someone tries to assign callback function.
|
||||
* Also, NULL pointer marks a 'sizing stream' that does not
|
||||
* write anything.
|
||||
*/
|
||||
int *callback;
|
||||
#else
|
||||
bool (*callback)(pb_ostream_t *stream, const uint8_t *buf, size_t count);
|
||||
#endif
|
||||
void *state; /* Free field for use by callback implementation. */
|
||||
size_t max_size; /* Limit number of output bytes written (or use SIZE_MAX). */
|
||||
size_t bytes_written; /* Number of bytes written so far. */
|
||||
|
||||
#ifndef PB_NO_ERRMSG
|
||||
const char *errmsg;
|
||||
#endif
|
||||
};
|
||||
|
||||
/***************************
|
||||
* Main encoding functions *
|
||||
***************************/
|
||||
@@ -70,40 +104,6 @@ pb_ostream_t pb_ostream_from_buffer(uint8_t *buf, size_t bufsize);
|
||||
*/
|
||||
bool pb_write(pb_ostream_t *stream, const uint8_t *buf, size_t count);
|
||||
|
||||
/* Structure for defining custom output streams. You will need to provide
|
||||
* a callback function to write the bytes to your storage, which can be
|
||||
* for example a file or a network socket.
|
||||
*
|
||||
* The callback must conform to these rules:
|
||||
*
|
||||
* 1) Return false on IO errors. This will cause encoding to abort.
|
||||
* 2) You can use state to store your own data (e.g. buffer pointer).
|
||||
* 3) pb_write will update bytes_written after your callback runs.
|
||||
* 4) Substreams will modify max_size and bytes_written. Don't use them
|
||||
* to calculate any pointers.
|
||||
*/
|
||||
struct _pb_ostream_t
|
||||
{
|
||||
#ifdef PB_BUFFER_ONLY
|
||||
/* Callback pointer is not used in buffer-only configuration.
|
||||
* Having an int pointer here allows binary compatibility but
|
||||
* gives an error if someone tries to assign callback function.
|
||||
* Also, NULL pointer marks a 'sizing stream' that does not
|
||||
* write anything.
|
||||
*/
|
||||
int *callback;
|
||||
#else
|
||||
bool (*callback)(pb_ostream_t *stream, const uint8_t *buf, size_t count);
|
||||
#endif
|
||||
void *state; /* Free field for use by callback implementation. */
|
||||
size_t max_size; /* Limit number of output bytes written (or use SIZE_MAX). */
|
||||
size_t bytes_written; /* Number of bytes written so far. */
|
||||
|
||||
#ifndef PB_NO_ERRMSG
|
||||
const char *errmsg;
|
||||
#endif
|
||||
};
|
||||
|
||||
|
||||
/************************************************
|
||||
* Helper functions for writing field callbacks *
|
||||
|
||||
Reference in New Issue
Block a user