diff --git a/erizo/.gitignore b/erizo/.gitignore new file mode 100644 index 0000000000..378eac25d3 --- /dev/null +++ b/erizo/.gitignore @@ -0,0 +1 @@ +build diff --git a/erizo/README.md b/erizo/README.md new file mode 100644 index 0000000000..a3af07ede8 --- /dev/null +++ b/erizo/README.md @@ -0,0 +1,53 @@ +# Erizo, a C/C++ Multipoint Control Unit (MCU) Library for WebRTC +*Warning: This is the 'media' branch, it is highly unstable* + +Erizo is a project that aims to implement a library able to communicate with WebRTC (http://www.webrtc.org) browser clients in order to provide advanced communication services. Currently it is tested on Ubuntu 11.10 and above but it should be able to be compiled on other distributions. + +Updated code documentation can be found at http://ging.github.com/erizo + +## Directory structure + +- /src - The root source directory +- /src/erizo - The source of the main library +- /src/examples - Examples and tests + +## Requirements + +- CMake >= 2.8 +- libSRTP version >= 1.4.4 +- Libnice version >= 1.10 +- boost_threads >= 1.48 +- boost_regex >= 1.48 (optional, only for examples) +- boost_asio >= 1.48 (optional, only for examples) +- boost_system >= 1.48 (optional, only for examples) + +## Building Instructions + +This project is built using CMake. + +The easiest way to build it is to use the provided scripts: +- Run ./generateProject.sh to run cmake, test the dependencies and generate the Makefile. +- Run ./buildProyect.sh to build the project after generating the Makefile. It simply runs make in the build directory. +- Run ./generateEclipseProyect.sh to generate an Eclipse CDT project which can be imported and used to work with the code. + +If doxygen is availabe a "doc" target is generated. HTML documentation can be built by running "make doc" in the build directory. + +##Examples + +As of now, the only application built using the library is a streaming application that connects via TCP to a server application built on top of node.js. + +The node.js app is not released but the code should provide an example on how the SDP exchange is made. +The examples are unusable as of now, but they can help to understand the API. +In the future, we will include here tests and updated examples. + +## License + +The MIT License + +Copyright (C) 2012 Universidad Politecnica de Madrid. + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/erizo/buildProject.sh b/erizo/buildProject.sh new file mode 100755 index 0000000000..5a2daf33e0 --- /dev/null +++ b/erizo/buildProject.sh @@ -0,0 +1,13 @@ +#!/bin/bash +runcmake() { + cmake ../src + echo "Done" +} +BIN_DIR="build" +if [ -d $BIN_DIR ]; then + cd $BIN_DIR + make +else + echo "Error, build directory does not exist, run generateProject.sh first" +fi + diff --git a/erizo/generateEclipseProject.sh b/erizo/generateEclipseProject.sh new file mode 100755 index 0000000000..14bb5a242e --- /dev/null +++ b/erizo/generateEclipseProject.sh @@ -0,0 +1,12 @@ +#!/bin/bash +BIN_DIR="build" +if [ -d $BIN_DIR ]; then + cd $BIN_DIR + # Set to Debug to be able to debug in Eclipse + cmake -G"Eclipse CDT4 - Unix Makefiles" -D CMAKE_BUILD_TYPE=Debug ../src + echo "Done" + cd .. +else + echo "Error, build directory does not exist, run generateProject.sh first" +fi + diff --git a/erizo/generateProject.sh b/erizo/generateProject.sh new file mode 100755 index 0000000000..134ddc0dfb --- /dev/null +++ b/erizo/generateProject.sh @@ -0,0 +1,15 @@ +#!/bin/bash +runcmake() { + cmake ../src + echo "Done" +} +BIN_DIR="build" +if [ -d $BIN_DIR ]; then + cd $BIN_DIR + runcmake +else + mkdir $BIN_DIR + cd $BIN_DIR + runcmake +fi + diff --git a/erizo/src/CMakeLists.txt b/erizo/src/CMakeLists.txt new file mode 100644 index 0000000000..801c06c6f7 --- /dev/null +++ b/erizo/src/CMakeLists.txt @@ -0,0 +1,20 @@ +cmake_minimum_required(VERSION 2.8) + +project (ERIZO_ALL) +set (COMPILE_EXAMPLES OFF) +add_subdirectory(${ERIZO_ALL_SOURCES}erizo) +find_package (Doxygen) +if(DOXYGEN_FOUND) + configure_file(${CMAKE_CURRENT_SOURCE_DIR}/Doxyfile.in ${CMAKE_CURRENT_BINARY_DIR}/Doxyfile @ONLY) + add_custom_target(doc + ${DOXYGEN_EXECUTABLE} ${CMAKE_CURRENT_BINARY_DIR}/Doxyfile + WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR} + COMMENT "Generating API documentation with Doxygen" VERBATIM + ) +endif(DOXYGEN_FOUND) + +if(COMPILE_EXAMPLES) + include_directories(${ERIZO_ALL_SOURCES}erizo) + set (EXTRA_LIBS ${EXTRA_LIBS} erizo) + add_subdirectory(${ERIZO_ALL_SOURCES}examples) +endif(COMPILE_EXAMPLES) diff --git a/erizo/src/Doxyfile.in b/erizo/src/Doxyfile.in new file mode 100644 index 0000000000..24cabad707 --- /dev/null +++ b/erizo/src/Doxyfile.in @@ -0,0 +1,1716 @@ +# Doxyfile 1.7.4 + +# This file describes the settings to be used by the documentation system +# doxygen (www.doxygen.org) for a project. +# +# All text after a hash (#) is considered a comment and will be ignored. +# The format is: +# TAG = value [value, ...] +# For lists items can also be appended using: +# TAG += value [value, ...] +# Values that contain spaces should be placed between quotes (" "). + +#--------------------------------------------------------------------------- +# Project related configuration options +#--------------------------------------------------------------------------- + +# This tag specifies the encoding used for all characters in the config file +# that follow. The default is UTF-8 which is also the encoding used for all +# text before the first occurrence of this tag. Doxygen uses libiconv (or the +# iconv built into libc) for the transcoding. See +# http://www.gnu.org/software/libiconv for the list of possible encodings. + +DOXYFILE_ENCODING = UTF-8 + +# The PROJECT_NAME tag is a single word (or a sequence of words surrounded +# by quotes) that should identify the project. + +PROJECT_NAME = ERIZO + +# The PROJECT_NUMBER tag can be used to enter a project or revision number. +# This could be handy for archiving the generated documentation or +# if some version control system is used. + +PROJECT_NUMBER = + +# Using the PROJECT_BRIEF tag one can provide an optional one line description +# for a project that appears at the top of each page and should give viewer +# a quick idea about the purpose of the project. Keep the description short. + +PROJECT_BRIEF = + +# With the PROJECT_LOGO tag one can specify an logo or icon that is +# included in the documentation. The maximum height of the logo should not +# exceed 55 pixels and the maximum width should not exceed 200 pixels. +# Doxygen will copy the logo to the output directory. + +PROJECT_LOGO = + +# The OUTPUT_DIRECTORY tag is used to specify the (relative or absolute) +# base path where the generated documentation will be put. +# If a relative path is entered, it will be relative to the location +# where doxygen was started. If left blank the current directory will be used. + +OUTPUT_DIRECTORY = docs + +# If the CREATE_SUBDIRS tag is set to YES, then doxygen will create +# 4096 sub-directories (in 2 levels) under the output directory of each output +# format and will distribute the generated files over these directories. +# Enabling this option can be useful when feeding doxygen a huge amount of +# source files, where putting all generated files in the same directory would +# otherwise cause performance problems for the file system. + +CREATE_SUBDIRS = NO + +# The OUTPUT_LANGUAGE tag is used to specify the language in which all +# documentation generated by doxygen is written. Doxygen will use this +# information to generate all constant output in the proper language. +# The default language is English, other supported languages are: +# Afrikaans, Arabic, Brazilian, Catalan, Chinese, Chinese-Traditional, +# Croatian, Czech, Danish, Dutch, Esperanto, Farsi, Finnish, French, German, +# Greek, Hungarian, Italian, Japanese, Japanese-en (Japanese with English +# messages), Korean, Korean-en, Lithuanian, Norwegian, Macedonian, Persian, +# Polish, Portuguese, Romanian, Russian, Serbian, Serbian-Cyrillic, Slovak, +# Slovene, Spanish, Swedish, Ukrainian, and Vietnamese. + +OUTPUT_LANGUAGE = English + +# If the BRIEF_MEMBER_DESC tag is set to YES (the default) Doxygen will +# include brief member descriptions after the members that are listed in +# the file and class documentation (similar to JavaDoc). +# Set to NO to disable this. + +BRIEF_MEMBER_DESC = YES + +# If the REPEAT_BRIEF tag is set to YES (the default) Doxygen will prepend +# the brief description of a member or function before the detailed description. +# Note: if both HIDE_UNDOC_MEMBERS and BRIEF_MEMBER_DESC are set to NO, the +# brief descriptions will be completely suppressed. + +REPEAT_BRIEF = YES + +# This tag implements a quasi-intelligent brief description abbreviator +# that is used to form the text in various listings. Each string +# in this list, if found as the leading text of the brief description, will be +# stripped from the text and the result after processing the whole list, is +# used as the annotated text. Otherwise, the brief description is used as-is. +# If left blank, the following values are used ("$name" is automatically +# replaced with the name of the entity): "The $name class" "The $name widget" +# "The $name file" "is" "provides" "specifies" "contains" +# "represents" "a" "an" "the" + +ABBREVIATE_BRIEF = + +# If the ALWAYS_DETAILED_SEC and REPEAT_BRIEF tags are both set to YES then +# Doxygen will generate a detailed section even if there is only a brief +# description. + +ALWAYS_DETAILED_SEC = NO + +# If the INLINE_INHERITED_MEMB tag is set to YES, doxygen will show all +# inherited members of a class in the documentation of that class as if those +# members were ordinary class members. Constructors, destructors and assignment +# operators of the base classes will not be shown. + +INLINE_INHERITED_MEMB = NO + +# If the FULL_PATH_NAMES tag is set to YES then Doxygen will prepend the full +# path before files name in the file list and in the header files. If set +# to NO the shortest path that makes the file name unique will be used. + +FULL_PATH_NAMES = NO + +# If the FULL_PATH_NAMES tag is set to YES then the STRIP_FROM_PATH tag +# can be used to strip a user-defined part of the path. Stripping is +# only done if one of the specified strings matches the left-hand part of +# the path. The tag can be used to show relative paths in the file list. +# If left blank the directory from which doxygen is run is used as the +# path to strip. + +STRIP_FROM_PATH = + +# The STRIP_FROM_INC_PATH tag can be used to strip a user-defined part of +# the path mentioned in the documentation of a class, which tells +# the reader which header file to include in order to use a class. +# If left blank only the name of the header file containing the class +# definition is used. Otherwise one should specify the include paths that +# are normally passed to the compiler using the -I flag. + +STRIP_FROM_INC_PATH = + +# If the SHORT_NAMES tag is set to YES, doxygen will generate much shorter +# (but less readable) file names. This can be useful if your file system +# doesn't support long names like on DOS, Mac, or CD-ROM. + +SHORT_NAMES = NO + +# If the JAVADOC_AUTOBRIEF tag is set to YES then Doxygen +# will interpret the first line (until the first dot) of a JavaDoc-style +# comment as the brief description. If set to NO, the JavaDoc +# comments will behave just like regular Qt-style comments +# (thus requiring an explicit @brief command for a brief description.) + +JAVADOC_AUTOBRIEF = YES + +# If the QT_AUTOBRIEF tag is set to YES then Doxygen will +# interpret the first line (until the first dot) of a Qt-style +# comment as the brief description. If set to NO, the comments +# will behave just like regular Qt-style comments (thus requiring +# an explicit \brief command for a brief description.) + +QT_AUTOBRIEF = NO + +# The MULTILINE_CPP_IS_BRIEF tag can be set to YES to make Doxygen +# treat a multi-line C++ special comment block (i.e. a block of //! or /// +# comments) as a brief description. This used to be the default behaviour. +# The new default is to treat a multi-line C++ comment block as a detailed +# description. Set this tag to YES if you prefer the old behaviour instead. + +MULTILINE_CPP_IS_BRIEF = NO + +# If the INHERIT_DOCS tag is set to YES (the default) then an undocumented +# member inherits the documentation from any documented member that it +# re-implements. + +INHERIT_DOCS = YES + +# If the SEPARATE_MEMBER_PAGES tag is set to YES, then doxygen will produce +# a new page for each member. If set to NO, the documentation of a member will +# be part of the file/class/namespace that contains it. + +SEPARATE_MEMBER_PAGES = NO + +# The TAB_SIZE tag can be used to set the number of spaces in a tab. +# Doxygen uses this value to replace tabs by spaces in code fragments. + +TAB_SIZE = 8 + +# This tag can be used to specify a number of aliases that acts +# as commands in the documentation. An alias has the form "name=value". +# For example adding "sideeffect=\par Side Effects:\n" will allow you to +# put the command \sideeffect (or @sideeffect) in the documentation, which +# will result in a user-defined paragraph with heading "Side Effects:". +# You can put \n's in the value part of an alias to insert newlines. + +ALIASES = + +# Set the OPTIMIZE_OUTPUT_FOR_C tag to YES if your project consists of C +# sources only. Doxygen will then generate output that is more tailored for C. +# For instance, some of the names that are used will be different. The list +# of all members will be omitted, etc. + +OPTIMIZE_OUTPUT_FOR_C = NO + +# Set the OPTIMIZE_OUTPUT_JAVA tag to YES if your project consists of Java +# sources only. Doxygen will then generate output that is more tailored for +# Java. For instance, namespaces will be presented as packages, qualified +# scopes will look different, etc. + +OPTIMIZE_OUTPUT_JAVA = NO + +# Set the OPTIMIZE_FOR_FORTRAN tag to YES if your project consists of Fortran +# sources only. Doxygen will then generate output that is more tailored for +# Fortran. + +OPTIMIZE_FOR_FORTRAN = NO + +# Set the OPTIMIZE_OUTPUT_VHDL tag to YES if your project consists of VHDL +# sources. Doxygen will then generate output that is tailored for +# VHDL. + +OPTIMIZE_OUTPUT_VHDL = NO + +# Doxygen selects the parser to use depending on the extension of the files it +# parses. With this tag you can assign which parser to use for a given extension. +# Doxygen has a built-in mapping, but you can override or extend it using this +# tag. The format is ext=language, where ext is a file extension, and language +# is one of the parsers supported by doxygen: IDL, Java, Javascript, CSharp, C, +# C++, D, PHP, Objective-C, Python, Fortran, VHDL, C, C++. For instance to make +# doxygen treat .inc files as Fortran files (default is PHP), and .f files as C +# (default is Fortran), use: inc=Fortran f=C. Note that for custom extensions +# you also need to set FILE_PATTERNS otherwise the files are not read by doxygen. + +EXTENSION_MAPPING = + +# If you use STL classes (i.e. std::string, std::vector, etc.) but do not want +# to include (a tag file for) the STL sources as input, then you should +# set this tag to YES in order to let doxygen match functions declarations and +# definitions whose arguments contain STL classes (e.g. func(std::string); v.s. +# func(std::string) {}). This also makes the inheritance and collaboration +# diagrams that involve STL classes more complete and accurate. + +BUILTIN_STL_SUPPORT = NO + +# If you use Microsoft's C++/CLI language, you should set this option to YES to +# enable parsing support. + +CPP_CLI_SUPPORT = NO + +# Set the SIP_SUPPORT tag to YES if your project consists of sip sources only. +# Doxygen will parse them like normal C++ but will assume all classes use public +# instead of private inheritance when no explicit protection keyword is present. + +SIP_SUPPORT = NO + +# For Microsoft's IDL there are propget and propput attributes to indicate getter +# and setter methods for a property. Setting this option to YES (the default) +# will make doxygen replace the get and set methods by a property in the +# documentation. This will only work if the methods are indeed getting or +# setting a simple type. If this is not the case, or you want to show the +# methods anyway, you should set this option to NO. + +IDL_PROPERTY_SUPPORT = YES + +# If member grouping is used in the documentation and the DISTRIBUTE_GROUP_DOC +# tag is set to YES, then doxygen will reuse the documentation of the first +# member in the group (if any) for the other members of the group. By default +# all members of a group must be documented explicitly. + +DISTRIBUTE_GROUP_DOC = NO + +# Set the SUBGROUPING tag to YES (the default) to allow class member groups of +# the same type (for instance a group of public functions) to be put as a +# subgroup of that type (e.g. under the Public Functions section). Set it to +# NO to prevent subgrouping. Alternatively, this can be done per class using +# the \nosubgrouping command. + +SUBGROUPING = YES + +# When the INLINE_GROUPED_CLASSES tag is set to YES, classes, structs and +# unions are shown inside the group in which they are included (e.g. using +# @ingroup) instead of on a separate page (for HTML and Man pages) or +# section (for LaTeX and RTF). + +INLINE_GROUPED_CLASSES = NO + +# When TYPEDEF_HIDES_STRUCT is enabled, a typedef of a struct, union, or enum +# is documented as struct, union, or enum with the name of the typedef. So +# typedef struct TypeS {} TypeT, will appear in the documentation as a struct +# with name TypeT. When disabled the typedef will appear as a member of a file, +# namespace, or class. And the struct will be named TypeS. This can typically +# be useful for C code in case the coding convention dictates that all compound +# types are typedef'ed and only the typedef is referenced, never the tag name. + +TYPEDEF_HIDES_STRUCT = NO + +# The SYMBOL_CACHE_SIZE determines the size of the internal cache use to +# determine which symbols to keep in memory and which to flush to disk. +# When the cache is full, less often used symbols will be written to disk. +# For small to medium size projects (<1000 input files) the default value is +# probably good enough. For larger projects a too small cache size can cause +# doxygen to be busy swapping symbols to and from disk most of the time +# causing a significant performance penalty. +# If the system has enough physical memory increasing the cache will improve the +# performance by keeping more symbols in memory. Note that the value works on +# a logarithmic scale so increasing the size by one will roughly double the +# memory usage. The cache size is given by this formula: +# 2^(16+SYMBOL_CACHE_SIZE). The valid range is 0..9, the default is 0, +# corresponding to a cache size of 2^16 = 65536 symbols + +SYMBOL_CACHE_SIZE = 0 + +#--------------------------------------------------------------------------- +# Build related configuration options +#--------------------------------------------------------------------------- + +# If the EXTRACT_ALL tag is set to YES doxygen will assume all entities in +# documentation are documented, even if no documentation was available. +# Private class members and static file members will be hidden unless +# the EXTRACT_PRIVATE and EXTRACT_STATIC tags are set to YES + +EXTRACT_ALL = YES + +# If the EXTRACT_PRIVATE tag is set to YES all private members of a class +# will be included in the documentation. + +EXTRACT_PRIVATE = NO + +# If the EXTRACT_STATIC tag is set to YES all static members of a file +# will be included in the documentation. + +EXTRACT_STATIC = NO + +# If the EXTRACT_LOCAL_CLASSES tag is set to YES classes (and structs) +# defined locally in source files will be included in the documentation. +# If set to NO only classes defined in header files are included. + +EXTRACT_LOCAL_CLASSES = YES + +# This flag is only useful for Objective-C code. When set to YES local +# methods, which are defined in the implementation section but not in +# the interface are included in the documentation. +# If set to NO (the default) only methods in the interface are included. + +EXTRACT_LOCAL_METHODS = NO + +# If this flag is set to YES, the members of anonymous namespaces will be +# extracted and appear in the documentation as a namespace called +# 'anonymous_namespace{file}', where file will be replaced with the base +# name of the file that contains the anonymous namespace. By default +# anonymous namespaces are hidden. + +EXTRACT_ANON_NSPACES = NO + +# If the HIDE_UNDOC_MEMBERS tag is set to YES, Doxygen will hide all +# undocumented members of documented classes, files or namespaces. +# If set to NO (the default) these members will be included in the +# various overviews, but no documentation section is generated. +# This option has no effect if EXTRACT_ALL is enabled. + +HIDE_UNDOC_MEMBERS = NO + +# If the HIDE_UNDOC_CLASSES tag is set to YES, Doxygen will hide all +# undocumented classes that are normally visible in the class hierarchy. +# If set to NO (the default) these classes will be included in the various +# overviews. This option has no effect if EXTRACT_ALL is enabled. + +HIDE_UNDOC_CLASSES = NO + +# If the HIDE_FRIEND_COMPOUNDS tag is set to YES, Doxygen will hide all +# friend (class|struct|union) declarations. +# If set to NO (the default) these declarations will be included in the +# documentation. + +HIDE_FRIEND_COMPOUNDS = NO + +# If the HIDE_IN_BODY_DOCS tag is set to YES, Doxygen will hide any +# documentation blocks found inside the body of a function. +# If set to NO (the default) these blocks will be appended to the +# function's detailed documentation block. + +HIDE_IN_BODY_DOCS = NO + +# The INTERNAL_DOCS tag determines if documentation +# that is typed after a \internal command is included. If the tag is set +# to NO (the default) then the documentation will be excluded. +# Set it to YES to include the internal documentation. + +INTERNAL_DOCS = NO + +# If the CASE_SENSE_NAMES tag is set to NO then Doxygen will only generate +# file names in lower-case letters. If set to YES upper-case letters are also +# allowed. This is useful if you have classes or files whose names only differ +# in case and if your file system supports case sensitive file names. Windows +# and Mac users are advised to set this option to NO. + +CASE_SENSE_NAMES = YES + +# If the HIDE_SCOPE_NAMES tag is set to NO (the default) then Doxygen +# will show members with their full class and namespace scopes in the +# documentation. If set to YES the scope will be hidden. + +HIDE_SCOPE_NAMES = NO + +# If the SHOW_INCLUDE_FILES tag is set to YES (the default) then Doxygen +# will put a list of the files that are included by a file in the documentation +# of that file. + +SHOW_INCLUDE_FILES = YES + +# If the FORCE_LOCAL_INCLUDES tag is set to YES then Doxygen +# will list include files with double quotes in the documentation +# rather than with sharp brackets. + +FORCE_LOCAL_INCLUDES = NO + +# If the INLINE_INFO tag is set to YES (the default) then a tag [inline] +# is inserted in the documentation for inline members. + +INLINE_INFO = YES + +# If the SORT_MEMBER_DOCS tag is set to YES (the default) then doxygen +# will sort the (detailed) documentation of file and class members +# alphabetically by member name. If set to NO the members will appear in +# declaration order. + +SORT_MEMBER_DOCS = YES + +# If the SORT_BRIEF_DOCS tag is set to YES then doxygen will sort the +# brief documentation of file, namespace and class members alphabetically +# by member name. If set to NO (the default) the members will appear in +# declaration order. + +SORT_BRIEF_DOCS = NO + +# If the SORT_MEMBERS_CTORS_1ST tag is set to YES then doxygen +# will sort the (brief and detailed) documentation of class members so that +# constructors and destructors are listed first. If set to NO (the default) +# the constructors will appear in the respective orders defined by +# SORT_MEMBER_DOCS and SORT_BRIEF_DOCS. +# This tag will be ignored for brief docs if SORT_BRIEF_DOCS is set to NO +# and ignored for detailed docs if SORT_MEMBER_DOCS is set to NO. + +SORT_MEMBERS_CTORS_1ST = NO + +# If the SORT_GROUP_NAMES tag is set to YES then doxygen will sort the +# hierarchy of group names into alphabetical order. If set to NO (the default) +# the group names will appear in their defined order. + +SORT_GROUP_NAMES = NO + +# If the SORT_BY_SCOPE_NAME tag is set to YES, the class list will be +# sorted by fully-qualified names, including namespaces. If set to +# NO (the default), the class list will be sorted only by class name, +# not including the namespace part. +# Note: This option is not very useful if HIDE_SCOPE_NAMES is set to YES. +# Note: This option applies only to the class list, not to the +# alphabetical list. + +SORT_BY_SCOPE_NAME = NO + +# If the STRICT_PROTO_MATCHING option is enabled and doxygen fails to +# do proper type resolution of all parameters of a function it will reject a +# match between the prototype and the implementation of a member function even +# if there is only one candidate or it is obvious which candidate to choose +# by doing a simple string match. By disabling STRICT_PROTO_MATCHING doxygen +# will still accept a match between prototype and implementation in such cases. + +STRICT_PROTO_MATCHING = NO + +# The GENERATE_TODOLIST tag can be used to enable (YES) or +# disable (NO) the todo list. This list is created by putting \todo +# commands in the documentation. + +GENERATE_TODOLIST = YES + +# The GENERATE_TESTLIST tag can be used to enable (YES) or +# disable (NO) the test list. This list is created by putting \test +# commands in the documentation. + +GENERATE_TESTLIST = YES + +# The GENERATE_BUGLIST tag can be used to enable (YES) or +# disable (NO) the bug list. This list is created by putting \bug +# commands in the documentation. + +GENERATE_BUGLIST = YES + +# The GENERATE_DEPRECATEDLIST tag can be used to enable (YES) or +# disable (NO) the deprecated list. This list is created by putting +# \deprecated commands in the documentation. + +GENERATE_DEPRECATEDLIST= YES + +# The ENABLED_SECTIONS tag can be used to enable conditional +# documentation sections, marked by \if sectionname ... \endif. + +ENABLED_SECTIONS = + +# The MAX_INITIALIZER_LINES tag determines the maximum number of lines +# the initial value of a variable or macro consists of for it to appear in +# the documentation. If the initializer consists of more lines than specified +# here it will be hidden. Use a value of 0 to hide initializers completely. +# The appearance of the initializer of individual variables and macros in the +# documentation can be controlled using \showinitializer or \hideinitializer +# command in the documentation regardless of this setting. + +MAX_INITIALIZER_LINES = 30 + +# Set the SHOW_USED_FILES tag to NO to disable the list of files generated +# at the bottom of the documentation of classes and structs. If set to YES the +# list will mention the files that were used to generate the documentation. + +SHOW_USED_FILES = YES + +# If the sources in your project are distributed over multiple directories +# then setting the SHOW_DIRECTORIES tag to YES will show the directory hierarchy +# in the documentation. The default is NO. + +SHOW_DIRECTORIES = NO + +# Set the SHOW_FILES tag to NO to disable the generation of the Files page. +# This will remove the Files entry from the Quick Index and from the +# Folder Tree View (if specified). The default is YES. + +SHOW_FILES = YES + +# Set the SHOW_NAMESPACES tag to NO to disable the generation of the +# Namespaces page. +# This will remove the Namespaces entry from the Quick Index +# and from the Folder Tree View (if specified). The default is YES. + +SHOW_NAMESPACES = YES + +# The FILE_VERSION_FILTER tag can be used to specify a program or script that +# doxygen should invoke to get the current version for each file (typically from +# the version control system). Doxygen will invoke the program by executing (via +# popen()) the command , where is the value of +# the FILE_VERSION_FILTER tag, and is the name of an input file +# provided by doxygen. Whatever the program writes to standard output +# is used as the file version. See the manual for examples. + +FILE_VERSION_FILTER = + +# The LAYOUT_FILE tag can be used to specify a layout file which will be parsed +# by doxygen. The layout file controls the global structure of the generated +# output files in an output format independent way. The create the layout file +# that represents doxygen's defaults, run doxygen with the -l option. +# You can optionally specify a file name after the option, if omitted +# DoxygenLayout.xml will be used as the name of the layout file. + +LAYOUT_FILE = + +#--------------------------------------------------------------------------- +# configuration options related to warning and progress messages +#--------------------------------------------------------------------------- + +# The QUIET tag can be used to turn on/off the messages that are generated +# by doxygen. Possible values are YES and NO. If left blank NO is used. + +QUIET = NO + +# The WARNINGS tag can be used to turn on/off the warning messages that are +# generated by doxygen. Possible values are YES and NO. If left blank +# NO is used. + +WARNINGS = YES + +# If WARN_IF_UNDOCUMENTED is set to YES, then doxygen will generate warnings +# for undocumented members. If EXTRACT_ALL is set to YES then this flag will +# automatically be disabled. + +WARN_IF_UNDOCUMENTED = YES + +# If WARN_IF_DOC_ERROR is set to YES, doxygen will generate warnings for +# potential errors in the documentation, such as not documenting some +# parameters in a documented function, or documenting parameters that +# don't exist or using markup commands wrongly. + +WARN_IF_DOC_ERROR = YES + +# The WARN_NO_PARAMDOC option can be enabled to get warnings for +# functions that are documented, but have no documentation for their parameters +# or return value. If set to NO (the default) doxygen will only warn about +# wrong or incomplete parameter documentation, but not about the absence of +# documentation. + +WARN_NO_PARAMDOC = NO + +# The WARN_FORMAT tag determines the format of the warning messages that +# doxygen can produce. The string should contain the $file, $line, and $text +# tags, which will be replaced by the file and line number from which the +# warning originated and the warning text. Optionally the format may contain +# $version, which will be replaced by the version of the file (if it could +# be obtained via FILE_VERSION_FILTER) + +WARN_FORMAT = "$file:$line: $text" + +# The WARN_LOGFILE tag can be used to specify a file to which warning +# and error messages should be written. If left blank the output is written +# to stderr. + +WARN_LOGFILE = + +#--------------------------------------------------------------------------- +# configuration options related to the input files +#--------------------------------------------------------------------------- + +# The INPUT tag can be used to specify the files and/or directories that contain +# documented source files. You may enter file names like "myfile.cpp" or +# directories like "/usr/src/myproject". Separate the files or directories +# with spaces. + +INPUT = @CMAKE_CURRENT_SOURCE_DIR@/erizo + +# This tag can be used to specify the character encoding of the source files +# that doxygen parses. Internally doxygen uses the UTF-8 encoding, which is +# also the default input encoding. Doxygen uses libiconv (or the iconv built +# into libc) for the transcoding. See http://www.gnu.org/software/libiconv for +# the list of possible encodings. + +INPUT_ENCODING = UTF-8 + +# If the value of the INPUT tag contains directories, you can use the +# FILE_PATTERNS tag to specify one or more wildcard pattern (like *.cpp +# and *.h) to filter out the source-files in the directories. If left +# blank the following patterns are tested: +# *.c *.cc *.cxx *.cpp *.c++ *.d *.java *.ii *.ixx *.ipp *.i++ *.inl *.h *.hh +# *.hxx *.hpp *.h++ *.idl *.odl *.cs *.php *.php3 *.inc *.m *.mm *.dox *.py +# *.f90 *.f *.for *.vhd *.vhdl + +FILE_PATTERNS = + +# The RECURSIVE tag can be used to turn specify whether or not subdirectories +# should be searched for input files as well. Possible values are YES and NO. +# If left blank NO is used. + +RECURSIVE = NO + +# The EXCLUDE tag can be used to specify files and/or directories that should +# excluded from the INPUT source files. This way you can easily exclude a +# subdirectory from a directory tree whose root is specified with the INPUT tag. + +EXCLUDE = + +# The EXCLUDE_SYMLINKS tag can be used select whether or not files or +# directories that are symbolic links (a Unix file system feature) are excluded +# from the input. + +EXCLUDE_SYMLINKS = NO + +# If the value of the INPUT tag contains directories, you can use the +# EXCLUDE_PATTERNS tag to specify one or more wildcard patterns to exclude +# certain files from those directories. Note that the wildcards are matched +# against the file with absolute path, so to exclude all test directories +# for example use the pattern */test/* + +EXCLUDE_PATTERNS = + +# The EXCLUDE_SYMBOLS tag can be used to specify one or more symbol names +# (namespaces, classes, functions, etc.) that should be excluded from the +# output. The symbol name can be a fully qualified name, a word, or if the +# wildcard * is used, a substring. Examples: ANamespace, AClass, +# AClass::ANamespace, ANamespace::*Test + +EXCLUDE_SYMBOLS = + +# The EXAMPLE_PATH tag can be used to specify one or more files or +# directories that contain example code fragments that are included (see +# the \include command). + +EXAMPLE_PATH = + +# If the value of the EXAMPLE_PATH tag contains directories, you can use the +# EXAMPLE_PATTERNS tag to specify one or more wildcard pattern (like *.cpp +# and *.h) to filter out the source-files in the directories. If left +# blank all files are included. + +EXAMPLE_PATTERNS = + +# If the EXAMPLE_RECURSIVE tag is set to YES then subdirectories will be +# searched for input files to be used with the \include or \dontinclude +# commands irrespective of the value of the RECURSIVE tag. +# Possible values are YES and NO. If left blank NO is used. + +EXAMPLE_RECURSIVE = NO + +# The IMAGE_PATH tag can be used to specify one or more files or +# directories that contain image that are included in the documentation (see +# the \image command). + +IMAGE_PATH = + +# The INPUT_FILTER tag can be used to specify a program that doxygen should +# invoke to filter for each input file. Doxygen will invoke the filter program +# by executing (via popen()) the command , where +# is the value of the INPUT_FILTER tag, and is the name of an +# input file. Doxygen will then use the output that the filter program writes +# to standard output. +# If FILTER_PATTERNS is specified, this tag will be +# ignored. + +INPUT_FILTER = + +# The FILTER_PATTERNS tag can be used to specify filters on a per file pattern +# basis. +# Doxygen will compare the file name with each pattern and apply the +# filter if there is a match. +# The filters are a list of the form: +# pattern=filter (like *.cpp=my_cpp_filter). See INPUT_FILTER for further +# info on how filters are used. If FILTER_PATTERNS is empty or if +# non of the patterns match the file name, INPUT_FILTER is applied. + +FILTER_PATTERNS = + +# If the FILTER_SOURCE_FILES tag is set to YES, the input filter (if set using +# INPUT_FILTER) will be used to filter the input files when producing source +# files to browse (i.e. when SOURCE_BROWSER is set to YES). + +FILTER_SOURCE_FILES = NO + +# The FILTER_SOURCE_PATTERNS tag can be used to specify source filters per file +# pattern. A pattern will override the setting for FILTER_PATTERN (if any) +# and it is also possible to disable source filtering for a specific pattern +# using *.ext= (so without naming a filter). This option only has effect when +# FILTER_SOURCE_FILES is enabled. + +FILTER_SOURCE_PATTERNS = + +#--------------------------------------------------------------------------- +# configuration options related to source browsing +#--------------------------------------------------------------------------- + +# If the SOURCE_BROWSER tag is set to YES then a list of source files will +# be generated. Documented entities will be cross-referenced with these sources. +# Note: To get rid of all source code in the generated output, make sure also +# VERBATIM_HEADERS is set to NO. + +SOURCE_BROWSER = NO + +# Setting the INLINE_SOURCES tag to YES will include the body +# of functions and classes directly in the documentation. + +INLINE_SOURCES = NO + +# Setting the STRIP_CODE_COMMENTS tag to YES (the default) will instruct +# doxygen to hide any special comment blocks from generated source code +# fragments. Normal C and C++ comments will always remain visible. + +STRIP_CODE_COMMENTS = YES + +# If the REFERENCED_BY_RELATION tag is set to YES +# then for each documented function all documented +# functions referencing it will be listed. + +REFERENCED_BY_RELATION = NO + +# If the REFERENCES_RELATION tag is set to YES +# then for each documented function all documented entities +# called/used by that function will be listed. + +REFERENCES_RELATION = NO + +# If the REFERENCES_LINK_SOURCE tag is set to YES (the default) +# and SOURCE_BROWSER tag is set to YES, then the hyperlinks from +# functions in REFERENCES_RELATION and REFERENCED_BY_RELATION lists will +# link to the source code. +# Otherwise they will link to the documentation. + +REFERENCES_LINK_SOURCE = YES + +# If the USE_HTAGS tag is set to YES then the references to source code +# will point to the HTML generated by the htags(1) tool instead of doxygen +# built-in source browser. The htags tool is part of GNU's global source +# tagging system (see http://www.gnu.org/software/global/global.html). You +# will need version 4.8.6 or higher. + +USE_HTAGS = NO + +# If the VERBATIM_HEADERS tag is set to YES (the default) then Doxygen +# will generate a verbatim copy of the header file for each class for +# which an include is specified. Set to NO to disable this. + +VERBATIM_HEADERS = YES + +#--------------------------------------------------------------------------- +# configuration options related to the alphabetical class index +#--------------------------------------------------------------------------- + +# If the ALPHABETICAL_INDEX tag is set to YES, an alphabetical index +# of all compounds will be generated. Enable this if the project +# contains a lot of classes, structs, unions or interfaces. + +ALPHABETICAL_INDEX = YES + +# If the alphabetical index is enabled (see ALPHABETICAL_INDEX) then +# the COLS_IN_ALPHA_INDEX tag can be used to specify the number of columns +# in which this list will be split (can be a number in the range [1..20]) + +COLS_IN_ALPHA_INDEX = 5 + +# In case all classes in a project start with a common prefix, all +# classes will be put under the same header in the alphabetical index. +# The IGNORE_PREFIX tag can be used to specify one or more prefixes that +# should be ignored while generating the index headers. + +IGNORE_PREFIX = + +#--------------------------------------------------------------------------- +# configuration options related to the HTML output +#--------------------------------------------------------------------------- + +# If the GENERATE_HTML tag is set to YES (the default) Doxygen will +# generate HTML output. + +GENERATE_HTML = YES + +# The HTML_OUTPUT tag is used to specify where the HTML docs will be put. +# If a relative path is entered the value of OUTPUT_DIRECTORY will be +# put in front of it. If left blank `html' will be used as the default path. + +HTML_OUTPUT = html + +# The HTML_FILE_EXTENSION tag can be used to specify the file extension for +# each generated HTML page (for example: .htm,.php,.asp). If it is left blank +# doxygen will generate files with .html extension. + +HTML_FILE_EXTENSION = .html + +# The HTML_HEADER tag can be used to specify a personal HTML header for +# each generated HTML page. If it is left blank doxygen will generate a +# standard header. Note that when using a custom header you are responsible +# for the proper inclusion of any scripts and style sheets that doxygen +# needs, which is dependent on the configuration options used. +# It is adviced to generate a default header using "doxygen -w html +# header.html footer.html stylesheet.css YourConfigFile" and then modify +# that header. Note that the header is subject to change so you typically +# have to redo this when upgrading to a newer version of doxygen or when changing the value of configuration settings such as GENERATE_TREEVIEW! + +HTML_HEADER = + +# The HTML_FOOTER tag can be used to specify a personal HTML footer for +# each generated HTML page. If it is left blank doxygen will generate a +# standard footer. + +HTML_FOOTER = + +# The HTML_STYLESHEET tag can be used to specify a user-defined cascading +# style sheet that is used by each HTML page. It can be used to +# fine-tune the look of the HTML output. If the tag is left blank doxygen +# will generate a default style sheet. Note that doxygen will try to copy +# the style sheet file to the HTML output directory, so don't put your own +# stylesheet in the HTML output directory as well, or it will be erased! + +HTML_STYLESHEET = + +# The HTML_EXTRA_FILES tag can be used to specify one or more extra images or +# other source files which should be copied to the HTML output directory. Note +# that these files will be copied to the base HTML output directory. Use the +# $relpath$ marker in the HTML_HEADER and/or HTML_FOOTER files to load these +# files. In the HTML_STYLESHEET file, use the file name only. Also note that +# the files will be copied as-is; there are no commands or markers available. + +HTML_EXTRA_FILES = + +# The HTML_COLORSTYLE_HUE tag controls the color of the HTML output. +# Doxygen will adjust the colors in the stylesheet and background images +# according to this color. Hue is specified as an angle on a colorwheel, +# see http://en.wikipedia.org/wiki/Hue for more information. +# For instance the value 0 represents red, 60 is yellow, 120 is green, +# 180 is cyan, 240 is blue, 300 purple, and 360 is red again. +# The allowed range is 0 to 359. + +HTML_COLORSTYLE_HUE = 220 + +# The HTML_COLORSTYLE_SAT tag controls the purity (or saturation) of +# the colors in the HTML output. For a value of 0 the output will use +# grayscales only. A value of 255 will produce the most vivid colors. + +HTML_COLORSTYLE_SAT = 100 + +# The HTML_COLORSTYLE_GAMMA tag controls the gamma correction applied to +# the luminance component of the colors in the HTML output. Values below +# 100 gradually make the output lighter, whereas values above 100 make +# the output darker. The value divided by 100 is the actual gamma applied, +# so 80 represents a gamma of 0.8, The value 220 represents a gamma of 2.2, +# and 100 does not change the gamma. + +HTML_COLORSTYLE_GAMMA = 80 + +# If the HTML_TIMESTAMP tag is set to YES then the footer of each generated HTML +# page will contain the date and time when the page was generated. Setting +# this to NO can help when comparing the output of multiple runs. + +HTML_TIMESTAMP = YES + +# If the HTML_ALIGN_MEMBERS tag is set to YES, the members of classes, +# files or namespaces will be aligned in HTML using tables. If set to +# NO a bullet list will be used. + +HTML_ALIGN_MEMBERS = YES + +# If the HTML_DYNAMIC_SECTIONS tag is set to YES then the generated HTML +# documentation will contain sections that can be hidden and shown after the +# page has loaded. For this to work a browser that supports +# JavaScript and DHTML is required (for instance Mozilla 1.0+, Firefox +# Netscape 6.0+, Internet explorer 5.0+, Konqueror, or Safari). + +HTML_DYNAMIC_SECTIONS = YES + +# If the GENERATE_DOCSET tag is set to YES, additional index files +# will be generated that can be used as input for Apple's Xcode 3 +# integrated development environment, introduced with OSX 10.5 (Leopard). +# To create a documentation set, doxygen will generate a Makefile in the +# HTML output directory. Running make will produce the docset in that +# directory and running "make install" will install the docset in +# ~/Library/Developer/Shared/Documentation/DocSets so that Xcode will find +# it at startup. +# See http://developer.apple.com/tools/creatingdocsetswithdoxygen.html +# for more information. + +GENERATE_DOCSET = NO + +# When GENERATE_DOCSET tag is set to YES, this tag determines the name of the +# feed. A documentation feed provides an umbrella under which multiple +# documentation sets from a single provider (such as a company or product suite) +# can be grouped. + +DOCSET_FEEDNAME = "Doxygen generated docs" + +# When GENERATE_DOCSET tag is set to YES, this tag specifies a string that +# should uniquely identify the documentation set bundle. This should be a +# reverse domain-name style string, e.g. com.mycompany.MyDocSet. Doxygen +# will append .docset to the name. + +DOCSET_BUNDLE_ID = org.doxygen.Project + +# When GENERATE_PUBLISHER_ID tag specifies a string that should uniquely identify +# the documentation publisher. This should be a reverse domain-name style +# string, e.g. com.mycompany.MyDocSet.documentation. + +DOCSET_PUBLISHER_ID = org.doxygen.Publisher + +# The GENERATE_PUBLISHER_NAME tag identifies the documentation publisher. + +DOCSET_PUBLISHER_NAME = Publisher + +# If the GENERATE_HTMLHELP tag is set to YES, additional index files +# will be generated that can be used as input for tools like the +# Microsoft HTML help workshop to generate a compiled HTML help file (.chm) +# of the generated HTML documentation. + +GENERATE_HTMLHELP = NO + +# If the GENERATE_HTMLHELP tag is set to YES, the CHM_FILE tag can +# be used to specify the file name of the resulting .chm file. You +# can add a path in front of the file if the result should not be +# written to the html output directory. + +CHM_FILE = + +# If the GENERATE_HTMLHELP tag is set to YES, the HHC_LOCATION tag can +# be used to specify the location (absolute path including file name) of +# the HTML help compiler (hhc.exe). If non-empty doxygen will try to run +# the HTML help compiler on the generated index.hhp. + +HHC_LOCATION = + +# If the GENERATE_HTMLHELP tag is set to YES, the GENERATE_CHI flag +# controls if a separate .chi index file is generated (YES) or that +# it should be included in the master .chm file (NO). + +GENERATE_CHI = NO + +# If the GENERATE_HTMLHELP tag is set to YES, the CHM_INDEX_ENCODING +# is used to encode HtmlHelp index (hhk), content (hhc) and project file +# content. + +CHM_INDEX_ENCODING = + +# If the GENERATE_HTMLHELP tag is set to YES, the BINARY_TOC flag +# controls whether a binary table of contents is generated (YES) or a +# normal table of contents (NO) in the .chm file. + +BINARY_TOC = NO + +# The TOC_EXPAND flag can be set to YES to add extra items for group members +# to the contents of the HTML help documentation and to the tree view. + +TOC_EXPAND = NO + +# If the GENERATE_QHP tag is set to YES and both QHP_NAMESPACE and +# QHP_VIRTUAL_FOLDER are set, an additional index file will be generated +# that can be used as input for Qt's qhelpgenerator to generate a +# Qt Compressed Help (.qch) of the generated HTML documentation. + +GENERATE_QHP = NO + +# If the QHG_LOCATION tag is specified, the QCH_FILE tag can +# be used to specify the file name of the resulting .qch file. +# The path specified is relative to the HTML output folder. + +QCH_FILE = + +# The QHP_NAMESPACE tag specifies the namespace to use when generating +# Qt Help Project output. For more information please see +# http://doc.trolltech.com/qthelpproject.html#namespace + +QHP_NAMESPACE = org.doxygen.Project + +# The QHP_VIRTUAL_FOLDER tag specifies the namespace to use when generating +# Qt Help Project output. For more information please see +# http://doc.trolltech.com/qthelpproject.html#virtual-folders + +QHP_VIRTUAL_FOLDER = doc + +# If QHP_CUST_FILTER_NAME is set, it specifies the name of a custom filter to +# add. For more information please see +# http://doc.trolltech.com/qthelpproject.html#custom-filters + +QHP_CUST_FILTER_NAME = + +# The QHP_CUST_FILT_ATTRS tag specifies the list of the attributes of the +# custom filter to add. For more information please see +# +# Qt Help Project / Custom Filters. + +QHP_CUST_FILTER_ATTRS = + +# The QHP_SECT_FILTER_ATTRS tag specifies the list of the attributes this +# project's +# filter section matches. +# +# Qt Help Project / Filter Attributes. + +QHP_SECT_FILTER_ATTRS = + +# If the GENERATE_QHP tag is set to YES, the QHG_LOCATION tag can +# be used to specify the location of Qt's qhelpgenerator. +# If non-empty doxygen will try to run qhelpgenerator on the generated +# .qhp file. + +QHG_LOCATION = + +# If the GENERATE_ECLIPSEHELP tag is set to YES, additional index files +# will be generated, which together with the HTML files, form an Eclipse help +# plugin. To install this plugin and make it available under the help contents +# menu in Eclipse, the contents of the directory containing the HTML and XML +# files needs to be copied into the plugins directory of eclipse. The name of +# the directory within the plugins directory should be the same as +# the ECLIPSE_DOC_ID value. After copying Eclipse needs to be restarted before +# the help appears. + +GENERATE_ECLIPSEHELP = NO + +# A unique identifier for the eclipse help plugin. When installing the plugin +# the directory name containing the HTML and XML files should also have +# this name. + +ECLIPSE_DOC_ID = org.doxygen.Project + +# The DISABLE_INDEX tag can be used to turn on/off the condensed index at +# top of each HTML page. The value NO (the default) enables the index and +# the value YES disables it. + +DISABLE_INDEX = NO + +# The ENUM_VALUES_PER_LINE tag can be used to set the number of enum values +# (range [0,1..20]) that doxygen will group on one line in the generated HTML +# documentation. Note that a value of 0 will completely suppress the enum +# values from appearing in the overview section. + +ENUM_VALUES_PER_LINE = 4 + +# The GENERATE_TREEVIEW tag is used to specify whether a tree-like index +# structure should be generated to display hierarchical information. +# If the tag value is set to YES, a side panel will be generated +# containing a tree-like index structure (just like the one that +# is generated for HTML Help). For this to work a browser that supports +# JavaScript, DHTML, CSS and frames is required (i.e. any modern browser). +# Windows users are probably better off using the HTML help feature. + +GENERATE_TREEVIEW = YES + +# By enabling USE_INLINE_TREES, doxygen will generate the Groups, Directories, +# and Class Hierarchy pages using a tree view instead of an ordered list. + +USE_INLINE_TREES = YES + +# If the treeview is enabled (see GENERATE_TREEVIEW) then this tag can be +# used to set the initial width (in pixels) of the frame in which the tree +# is shown. + +TREEVIEW_WIDTH = 250 + +# When the EXT_LINKS_IN_WINDOW option is set to YES doxygen will open +# links to external symbols imported via tag files in a separate window. + +EXT_LINKS_IN_WINDOW = NO + +# Use this tag to change the font size of Latex formulas included +# as images in the HTML documentation. The default is 10. Note that +# when you change the font size after a successful doxygen run you need +# to manually remove any form_*.png images from the HTML output directory +# to force them to be regenerated. + +FORMULA_FONTSIZE = 10 + +# Use the FORMULA_TRANPARENT tag to determine whether or not the images +# generated for formulas are transparent PNGs. Transparent PNGs are +# not supported properly for IE 6.0, but are supported on all modern browsers. +# Note that when changing this option you need to delete any form_*.png files +# in the HTML output before the changes have effect. + +FORMULA_TRANSPARENT = YES + +# Enable the USE_MATHJAX option to render LaTeX formulas using MathJax +# (see http://www.mathjax.org) which uses client side Javascript for the +# rendering instead of using prerendered bitmaps. Use this if you do not +# have LaTeX installed or if you want to formulas look prettier in the HTML +# output. When enabled you also need to install MathJax separately and +# configure the path to it using the MATHJAX_RELPATH option. + +USE_MATHJAX = NO + +# When MathJax is enabled you need to specify the location relative to the +# HTML output directory using the MATHJAX_RELPATH option. The destination +# directory should contain the MathJax.js script. For instance, if the mathjax +# directory is located at the same level as the HTML output directory, then +# MATHJAX_RELPATH should be ../mathjax. The default value points to the +# mathjax.org site, so you can quickly see the result without installing +# MathJax, but it is strongly recommended to install a local copy of MathJax +# before deployment. + +MATHJAX_RELPATH = http://www.mathjax.org/mathjax + +# When the SEARCHENGINE tag is enabled doxygen will generate a search box +# for the HTML output. The underlying search engine uses javascript +# and DHTML and should work on any modern browser. Note that when using +# HTML help (GENERATE_HTMLHELP), Qt help (GENERATE_QHP), or docsets +# (GENERATE_DOCSET) there is already a search function so this one should +# typically be disabled. For large projects the javascript based search engine +# can be slow, then enabling SERVER_BASED_SEARCH may provide a better solution. + +SEARCHENGINE = YES + +# When the SERVER_BASED_SEARCH tag is enabled the search engine will be +# implemented using a PHP enabled web server instead of at the web client +# using Javascript. Doxygen will generate the search PHP script and index +# file to put on the web server. The advantage of the server +# based approach is that it scales better to large projects and allows +# full text search. The disadvantages are that it is more difficult to setup +# and does not have live searching capabilities. + +SERVER_BASED_SEARCH = NO + +#--------------------------------------------------------------------------- +# configuration options related to the LaTeX output +#--------------------------------------------------------------------------- + +# If the GENERATE_LATEX tag is set to YES (the default) Doxygen will +# generate Latex output. + +GENERATE_LATEX = NO + +# The LATEX_OUTPUT tag is used to specify where the LaTeX docs will be put. +# If a relative path is entered the value of OUTPUT_DIRECTORY will be +# put in front of it. If left blank `latex' will be used as the default path. + +LATEX_OUTPUT = latex + +# The LATEX_CMD_NAME tag can be used to specify the LaTeX command name to be +# invoked. If left blank `latex' will be used as the default command name. +# Note that when enabling USE_PDFLATEX this option is only used for +# generating bitmaps for formulas in the HTML output, but not in the +# Makefile that is written to the output directory. + +LATEX_CMD_NAME = latex + +# The MAKEINDEX_CMD_NAME tag can be used to specify the command name to +# generate index for LaTeX. If left blank `makeindex' will be used as the +# default command name. + +MAKEINDEX_CMD_NAME = makeindex + +# If the COMPACT_LATEX tag is set to YES Doxygen generates more compact +# LaTeX documents. This may be useful for small projects and may help to +# save some trees in general. + +COMPACT_LATEX = NO + +# The PAPER_TYPE tag can be used to set the paper type that is used +# by the printer. Possible values are: a4, letter, legal and +# executive. If left blank a4wide will be used. + +PAPER_TYPE = a4 + +# The EXTRA_PACKAGES tag can be to specify one or more names of LaTeX +# packages that should be included in the LaTeX output. + +EXTRA_PACKAGES = + +# The LATEX_HEADER tag can be used to specify a personal LaTeX header for +# the generated latex document. The header should contain everything until +# the first chapter. If it is left blank doxygen will generate a +# standard header. Notice: only use this tag if you know what you are doing! + +LATEX_HEADER = + +# The LATEX_FOOTER tag can be used to specify a personal LaTeX footer for +# the generated latex document. The footer should contain everything after +# the last chapter. If it is left blank doxygen will generate a +# standard footer. Notice: only use this tag if you know what you are doing! + +LATEX_FOOTER = + +# If the PDF_HYPERLINKS tag is set to YES, the LaTeX that is generated +# is prepared for conversion to pdf (using ps2pdf). The pdf file will +# contain links (just like the HTML output) instead of page references +# This makes the output suitable for online browsing using a pdf viewer. + +PDF_HYPERLINKS = YES + +# If the USE_PDFLATEX tag is set to YES, pdflatex will be used instead of +# plain latex in the generated Makefile. Set this option to YES to get a +# higher quality PDF documentation. + +USE_PDFLATEX = YES + +# If the LATEX_BATCHMODE tag is set to YES, doxygen will add the \\batchmode. +# command to the generated LaTeX files. This will instruct LaTeX to keep +# running if errors occur, instead of asking the user for help. +# This option is also used when generating formulas in HTML. + +LATEX_BATCHMODE = NO + +# If LATEX_HIDE_INDICES is set to YES then doxygen will not +# include the index chapters (such as File Index, Compound Index, etc.) +# in the output. + +LATEX_HIDE_INDICES = NO + +# If LATEX_SOURCE_CODE is set to YES then doxygen will include +# source code with syntax highlighting in the LaTeX output. +# Note that which sources are shown also depends on other settings +# such as SOURCE_BROWSER. + +LATEX_SOURCE_CODE = NO + +#--------------------------------------------------------------------------- +# configuration options related to the RTF output +#--------------------------------------------------------------------------- + +# If the GENERATE_RTF tag is set to YES Doxygen will generate RTF output +# The RTF output is optimized for Word 97 and may not look very pretty with +# other RTF readers or editors. + +GENERATE_RTF = NO + +# The RTF_OUTPUT tag is used to specify where the RTF docs will be put. +# If a relative path is entered the value of OUTPUT_DIRECTORY will be +# put in front of it. If left blank `rtf' will be used as the default path. + +RTF_OUTPUT = rtf + +# If the COMPACT_RTF tag is set to YES Doxygen generates more compact +# RTF documents. This may be useful for small projects and may help to +# save some trees in general. + +COMPACT_RTF = NO + +# If the RTF_HYPERLINKS tag is set to YES, the RTF that is generated +# will contain hyperlink fields. The RTF file will +# contain links (just like the HTML output) instead of page references. +# This makes the output suitable for online browsing using WORD or other +# programs which support those fields. +# Note: wordpad (write) and others do not support links. + +RTF_HYPERLINKS = NO + +# Load stylesheet definitions from file. Syntax is similar to doxygen's +# config file, i.e. a series of assignments. You only have to provide +# replacements, missing definitions are set to their default value. + +RTF_STYLESHEET_FILE = + +# Set optional variables used in the generation of an rtf document. +# Syntax is similar to doxygen's config file. + +RTF_EXTENSIONS_FILE = + +#--------------------------------------------------------------------------- +# configuration options related to the man page output +#--------------------------------------------------------------------------- + +# If the GENERATE_MAN tag is set to YES (the default) Doxygen will +# generate man pages + +GENERATE_MAN = NO + +# The MAN_OUTPUT tag is used to specify where the man pages will be put. +# If a relative path is entered the value of OUTPUT_DIRECTORY will be +# put in front of it. If left blank `man' will be used as the default path. + +MAN_OUTPUT = man + +# The MAN_EXTENSION tag determines the extension that is added to +# the generated man pages (default is the subroutine's section .3) + +MAN_EXTENSION = .3 + +# If the MAN_LINKS tag is set to YES and Doxygen generates man output, +# then it will generate one additional man file for each entity +# documented in the real man page(s). These additional files +# only source the real man page, but without them the man command +# would be unable to find the correct page. The default is NO. + +MAN_LINKS = NO + +#--------------------------------------------------------------------------- +# configuration options related to the XML output +#--------------------------------------------------------------------------- + +# If the GENERATE_XML tag is set to YES Doxygen will +# generate an XML file that captures the structure of +# the code including all documentation. + +GENERATE_XML = NO + +# The XML_OUTPUT tag is used to specify where the XML pages will be put. +# If a relative path is entered the value of OUTPUT_DIRECTORY will be +# put in front of it. If left blank `xml' will be used as the default path. + +XML_OUTPUT = xml + +# The XML_SCHEMA tag can be used to specify an XML schema, +# which can be used by a validating XML parser to check the +# syntax of the XML files. + +XML_SCHEMA = + +# The XML_DTD tag can be used to specify an XML DTD, +# which can be used by a validating XML parser to check the +# syntax of the XML files. + +XML_DTD = + +# If the XML_PROGRAMLISTING tag is set to YES Doxygen will +# dump the program listings (including syntax highlighting +# and cross-referencing information) to the XML output. Note that +# enabling this will significantly increase the size of the XML output. + +XML_PROGRAMLISTING = YES + +#--------------------------------------------------------------------------- +# configuration options for the AutoGen Definitions output +#--------------------------------------------------------------------------- + +# If the GENERATE_AUTOGEN_DEF tag is set to YES Doxygen will +# generate an AutoGen Definitions (see autogen.sf.net) file +# that captures the structure of the code including all +# documentation. Note that this feature is still experimental +# and incomplete at the moment. + +GENERATE_AUTOGEN_DEF = NO + +#--------------------------------------------------------------------------- +# configuration options related to the Perl module output +#--------------------------------------------------------------------------- + +# If the GENERATE_PERLMOD tag is set to YES Doxygen will +# generate a Perl module file that captures the structure of +# the code including all documentation. Note that this +# feature is still experimental and incomplete at the +# moment. + +GENERATE_PERLMOD = NO + +# If the PERLMOD_LATEX tag is set to YES Doxygen will generate +# the necessary Makefile rules, Perl scripts and LaTeX code to be able +# to generate PDF and DVI output from the Perl module output. + +PERLMOD_LATEX = NO + +# If the PERLMOD_PRETTY tag is set to YES the Perl module output will be +# nicely formatted so it can be parsed by a human reader. +# This is useful +# if you want to understand what is going on. +# On the other hand, if this +# tag is set to NO the size of the Perl module output will be much smaller +# and Perl will parse it just the same. + +PERLMOD_PRETTY = YES + +# The names of the make variables in the generated doxyrules.make file +# are prefixed with the string contained in PERLMOD_MAKEVAR_PREFIX. +# This is useful so different doxyrules.make files included by the same +# Makefile don't overwrite each other's variables. + +PERLMOD_MAKEVAR_PREFIX = + +#--------------------------------------------------------------------------- +# Configuration options related to the preprocessor +#--------------------------------------------------------------------------- + +# If the ENABLE_PREPROCESSING tag is set to YES (the default) Doxygen will +# evaluate all C-preprocessor directives found in the sources and include +# files. + +ENABLE_PREPROCESSING = YES + +# If the MACRO_EXPANSION tag is set to YES Doxygen will expand all macro +# names in the source code. If set to NO (the default) only conditional +# compilation will be performed. Macro expansion can be done in a controlled +# way by setting EXPAND_ONLY_PREDEF to YES. + +MACRO_EXPANSION = NO + +# If the EXPAND_ONLY_PREDEF and MACRO_EXPANSION tags are both set to YES +# then the macro expansion is limited to the macros specified with the +# PREDEFINED and EXPAND_AS_DEFINED tags. + +EXPAND_ONLY_PREDEF = NO + +# If the SEARCH_INCLUDES tag is set to YES (the default) the includes files +# pointed to by INCLUDE_PATH will be searched when a #include is found. + +SEARCH_INCLUDES = YES + +# The INCLUDE_PATH tag can be used to specify one or more directories that +# contain include files that are not input files but should be processed by +# the preprocessor. + +INCLUDE_PATH = + +# You can use the INCLUDE_FILE_PATTERNS tag to specify one or more wildcard +# patterns (like *.h and *.hpp) to filter out the header-files in the +# directories. If left blank, the patterns specified with FILE_PATTERNS will +# be used. + +INCLUDE_FILE_PATTERNS = + +# The PREDEFINED tag can be used to specify one or more macro names that +# are defined before the preprocessor is started (similar to the -D option of +# gcc). The argument of the tag is a list of macros of the form: name +# or name=definition (no spaces). If the definition and the = are +# omitted =1 is assumed. To prevent a macro definition from being +# undefined via #undef or recursively expanded use the := operator +# instead of the = operator. + +PREDEFINED = + +# If the MACRO_EXPANSION and EXPAND_ONLY_PREDEF tags are set to YES then +# this tag can be used to specify a list of macro names that should be expanded. +# The macro definition that is found in the sources will be used. +# Use the PREDEFINED tag if you want to use a different macro definition that +# overrules the definition found in the source code. + +EXPAND_AS_DEFINED = + +# If the SKIP_FUNCTION_MACROS tag is set to YES (the default) then +# doxygen's preprocessor will remove all references to function-like macros +# that are alone on a line, have an all uppercase name, and do not end with a +# semicolon, because these will confuse the parser if not removed. + +SKIP_FUNCTION_MACROS = YES + +#--------------------------------------------------------------------------- +# Configuration::additions related to external references +#--------------------------------------------------------------------------- + +# The TAGFILES option can be used to specify one or more tagfiles. +# Optionally an initial location of the external documentation +# can be added for each tagfile. The format of a tag file without +# this location is as follows: +# +# TAGFILES = file1 file2 ... +# Adding location for the tag files is done as follows: +# +# TAGFILES = file1=loc1 "file2 = loc2" ... +# where "loc1" and "loc2" can be relative or absolute paths or +# URLs. If a location is present for each tag, the installdox tool +# does not have to be run to correct the links. +# Note that each tag file must have a unique name +# (where the name does NOT include the path) +# If a tag file is not located in the directory in which doxygen +# is run, you must also specify the path to the tagfile here. + +TAGFILES = + +# When a file name is specified after GENERATE_TAGFILE, doxygen will create +# a tag file that is based on the input files it reads. + +GENERATE_TAGFILE = + +# If the ALLEXTERNALS tag is set to YES all external classes will be listed +# in the class index. If set to NO only the inherited external classes +# will be listed. + +ALLEXTERNALS = NO + +# If the EXTERNAL_GROUPS tag is set to YES all external groups will be listed +# in the modules index. If set to NO, only the current project's groups will +# be listed. + +EXTERNAL_GROUPS = YES + +# The PERL_PATH should be the absolute path and name of the perl script +# interpreter (i.e. the result of `which perl'). + +PERL_PATH = /usr/bin/perl + +#--------------------------------------------------------------------------- +# Configuration options related to the dot tool +#--------------------------------------------------------------------------- + +# If the CLASS_DIAGRAMS tag is set to YES (the default) Doxygen will +# generate a inheritance diagram (in HTML, RTF and LaTeX) for classes with base +# or super classes. Setting the tag to NO turns the diagrams off. Note that +# this option also works with HAVE_DOT disabled, but it is recommended to +# install and use dot, since it yields more powerful graphs. + +CLASS_DIAGRAMS = YES + +# You can define message sequence charts within doxygen comments using the \msc +# command. Doxygen will then run the mscgen tool (see +# http://www.mcternan.me.uk/mscgen/) to produce the chart and insert it in the +# documentation. The MSCGEN_PATH tag allows you to specify the directory where +# the mscgen tool resides. If left empty the tool is assumed to be found in the +# default search path. + +MSCGEN_PATH = + +# If set to YES, the inheritance and collaboration graphs will hide +# inheritance and usage relations if the target is undocumented +# or is not a class. + +HIDE_UNDOC_RELATIONS = YES + +# If you set the HAVE_DOT tag to YES then doxygen will assume the dot tool is +# available from the path. This tool is part of Graphviz, a graph visualization +# toolkit from AT&T and Lucent Bell Labs. The other options in this section +# have no effect if this option is set to NO (the default) + +HAVE_DOT = NO + +# The DOT_NUM_THREADS specifies the number of dot invocations doxygen is +# allowed to run in parallel. When set to 0 (the default) doxygen will +# base this on the number of processors available in the system. You can set it +# explicitly to a value larger than 0 to get control over the balance +# between CPU load and processing speed. + +DOT_NUM_THREADS = 0 + +# By default doxygen will write a font called Helvetica to the output +# directory and reference it in all dot files that doxygen generates. +# When you want a differently looking font you can specify the font name +# using DOT_FONTNAME. You need to make sure dot is able to find the font, +# which can be done by putting it in a standard location or by setting the +# DOTFONTPATH environment variable or by setting DOT_FONTPATH to the directory +# containing the font. + +DOT_FONTNAME = Helvetica + +# The DOT_FONTSIZE tag can be used to set the size of the font of dot graphs. +# The default size is 10pt. + +DOT_FONTSIZE = 10 + +# By default doxygen will tell dot to use the output directory to look for the +# FreeSans.ttf font (which doxygen will put there itself). If you specify a +# different font using DOT_FONTNAME you can set the path where dot +# can find it using this tag. + +DOT_FONTPATH = + +# If the CLASS_GRAPH and HAVE_DOT tags are set to YES then doxygen +# will generate a graph for each documented class showing the direct and +# indirect inheritance relations. Setting this tag to YES will force the +# the CLASS_DIAGRAMS tag to NO. + +CLASS_GRAPH = YES + +# If the COLLABORATION_GRAPH and HAVE_DOT tags are set to YES then doxygen +# will generate a graph for each documented class showing the direct and +# indirect implementation dependencies (inheritance, containment, and +# class references variables) of the class with other documented classes. + +COLLABORATION_GRAPH = YES + +# If the GROUP_GRAPHS and HAVE_DOT tags are set to YES then doxygen +# will generate a graph for groups, showing the direct groups dependencies + +GROUP_GRAPHS = YES + +# If the UML_LOOK tag is set to YES doxygen will generate inheritance and +# collaboration diagrams in a style similar to the OMG's Unified Modeling +# Language. + +UML_LOOK = NO + +# If set to YES, the inheritance and collaboration graphs will show the +# relations between templates and their instances. + +TEMPLATE_RELATIONS = NO + +# If the ENABLE_PREPROCESSING, SEARCH_INCLUDES, INCLUDE_GRAPH, and HAVE_DOT +# tags are set to YES then doxygen will generate a graph for each documented +# file showing the direct and indirect include dependencies of the file with +# other documented files. + +INCLUDE_GRAPH = YES + +# If the ENABLE_PREPROCESSING, SEARCH_INCLUDES, INCLUDED_BY_GRAPH, and +# HAVE_DOT tags are set to YES then doxygen will generate a graph for each +# documented header file showing the documented files that directly or +# indirectly include this file. + +INCLUDED_BY_GRAPH = YES + +# If the CALL_GRAPH and HAVE_DOT options are set to YES then +# doxygen will generate a call dependency graph for every global function +# or class method. Note that enabling this option will significantly increase +# the time of a run. So in most cases it will be better to enable call graphs +# for selected functions only using the \callgraph command. + +CALL_GRAPH = NO + +# If the CALLER_GRAPH and HAVE_DOT tags are set to YES then +# doxygen will generate a caller dependency graph for every global function +# or class method. Note that enabling this option will significantly increase +# the time of a run. So in most cases it will be better to enable caller +# graphs for selected functions only using the \callergraph command. + +CALLER_GRAPH = NO + +# If the GRAPHICAL_HIERARCHY and HAVE_DOT tags are set to YES then doxygen +# will generate a graphical hierarchy of all classes instead of a textual one. + +GRAPHICAL_HIERARCHY = YES + +# If the DIRECTORY_GRAPH, SHOW_DIRECTORIES and HAVE_DOT tags are set to YES +# then doxygen will show the dependencies a directory has on other directories +# in a graphical way. The dependency relations are determined by the #include +# relations between the files in the directories. + +DIRECTORY_GRAPH = YES + +# The DOT_IMAGE_FORMAT tag can be used to set the image format of the images +# generated by dot. Possible values are svg, png, jpg, or gif. +# If left blank png will be used. + +DOT_IMAGE_FORMAT = png + +# The tag DOT_PATH can be used to specify the path where the dot tool can be +# found. If left blank, it is assumed the dot tool can be found in the path. + +DOT_PATH = + +# The DOTFILE_DIRS tag can be used to specify one or more directories that +# contain dot files that are included in the documentation (see the +# \dotfile command). + +DOTFILE_DIRS = + +# The MSCFILE_DIRS tag can be used to specify one or more directories that +# contain msc files that are included in the documentation (see the +# \mscfile command). + +MSCFILE_DIRS = + +# The DOT_GRAPH_MAX_NODES tag can be used to set the maximum number of +# nodes that will be shown in the graph. If the number of nodes in a graph +# becomes larger than this value, doxygen will truncate the graph, which is +# visualized by representing a node as a red box. Note that doxygen if the +# number of direct children of the root node in a graph is already larger than +# DOT_GRAPH_MAX_NODES then the graph will not be shown at all. Also note +# that the size of a graph can be further restricted by MAX_DOT_GRAPH_DEPTH. + +DOT_GRAPH_MAX_NODES = 50 + +# The MAX_DOT_GRAPH_DEPTH tag can be used to set the maximum depth of the +# graphs generated by dot. A depth value of 3 means that only nodes reachable +# from the root by following a path via at most 3 edges will be shown. Nodes +# that lay further from the root node will be omitted. Note that setting this +# option to 1 or 2 may greatly reduce the computation time needed for large +# code bases. Also note that the size of a graph can be further restricted by +# DOT_GRAPH_MAX_NODES. Using a depth of 0 means no depth restriction. + +MAX_DOT_GRAPH_DEPTH = 0 + +# Set the DOT_TRANSPARENT tag to YES to generate images with a transparent +# background. This is disabled by default, because dot on Windows does not +# seem to support this out of the box. Warning: Depending on the platform used, +# enabling this option may lead to badly anti-aliased labels on the edges of +# a graph (i.e. they become hard to read). + +DOT_TRANSPARENT = NO + +# Set the DOT_MULTI_TARGETS tag to YES allow dot to generate multiple output +# files in one run (i.e. multiple -o and -T options on the command line). This +# makes dot run faster, but since only newer versions of dot (>1.8.10) +# support this, this feature is disabled by default. + +DOT_MULTI_TARGETS = YES + +# If the GENERATE_LEGEND tag is set to YES (the default) Doxygen will +# generate a legend page explaining the meaning of the various boxes and +# arrows in the dot generated graphs. + +GENERATE_LEGEND = YES + +# If the DOT_CLEANUP tag is set to YES (the default) Doxygen will +# remove the intermediate dot files that are used to generate +# the various graphs. + +DOT_CLEANUP = YES diff --git a/erizo/src/erizo/CMakeLists.txt b/erizo/src/erizo/CMakeLists.txt new file mode 100644 index 0000000000..b0d0a88450 --- /dev/null +++ b/erizo/src/erizo/CMakeLists.txt @@ -0,0 +1,41 @@ +cmake_minimum_required(VERSION 2.8) +#functions +function(test_lib LIB_NAME) + if (${LIB_NAME} MATCHES "^.*-NOTFOUND") + message(FATAL_ERROR "lib not found: " ${LIB_NAME} " check README") + return() + endif(${LIB_NAME} MATCHES "^.*-NOTFOUND") +endfunction(test_lib) +project (ERIZO) + +set(ERIZO_VERSION_MAJOR 0) +set(ERIZO_VERSION_MINOR 1) + +file(GLOB_RECURSE ERIZO_SOURCES ${ERIZO_SOURCE_DIR}/*.cpp ${ERIZO_SOURCE_DIR}/*.h) +add_library(erizo SHARED ${ERIZO_SOURCES}) +find_package(GTK2 REQUIRED gtk) +# GTK2 +if (GTK2_FOUND) + include_directories(${GTK2_INCLUDE_DIRS}) + target_link_libraries(erizo ${GTK2_LIBRARIES}) +endif(GTK2_FOUND) +# BOOST +set (BOOST_LIBS thread regex system) +find_package(Boost COMPONENTS ${BOOST_LIBS} REQUIRED) +target_link_libraries(erizo ${Boost_LIBRARIES}) + +#THE REST +find_library(SRTP srtp) +test_lib(${SRTP}) +set (LIBS ${LIBS} ${SRTP}) +find_library(NICE nice) +test_lib(${NICE}) +set (LIBS ${LIBS} ${NICE}) +find_library(GTHREAD gthread-2.0) +test_lib(${GTHREAD}) +set (LIBS ${LIBS} ${GTHREAD}) +find_library(AVCODEC avcodec) +set (LIBS ${LIBS} ${AVCODEC}) +find_library(AVFORMAT avformat) +set (LIBS ${LIBS} ${AVFORMAT}) +target_link_libraries(erizo ${LIBS}) diff --git a/erizo/src/erizo/MediaDefinitions.h b/erizo/src/erizo/MediaDefinitions.h new file mode 100644 index 0000000000..3e5a875f2f --- /dev/null +++ b/erizo/src/erizo/MediaDefinitions.h @@ -0,0 +1,37 @@ +/* + * mediadefinitions.h + */ + +#ifndef MEDIADEFINITIONS_H_ +#define MEDIADEFINITIONS_H_ + +namespace erizo{ + +class NiceConnection; + +struct packet{ + char data[1200]; + int length; +}; + +/** + * A MediaReceiver is any class that can receive audio or video data. + */ +class MediaReceiver{ +public: + virtual int receiveAudioData(char* buf, int len)=0; + virtual int receiveVideoData(char* buf, int len)=0; + virtual ~MediaReceiver(){}; +}; +/** + * A NiceReceiver is any class that can receive data from a nice connection. + */ +class NiceReceiver{ +public: + virtual int receiveNiceData(char* buf, int len, NiceConnection* nice)=0; + virtual ~NiceReceiver(){}; +}; + +} /* namespace erizo */ + +#endif /* MEDIADEFINITIONS_H_ */ diff --git a/erizo/src/erizo/NiceConnection.cpp b/erizo/src/erizo/NiceConnection.cpp new file mode 100644 index 0000000000..1f9b1b0612 --- /dev/null +++ b/erizo/src/erizo/NiceConnection.cpp @@ -0,0 +1,313 @@ +/* + * NiceConnection.cpp + */ + +#include +#include + +#include "NiceConnection.h" +#include "SdpInfo.h" + +namespace erizo { + + guint stream_id; + GSList* lcands; + int streamsGathered; + int rec, sen; + int length; + int components = 2; + uint32_t ssrc = 55543; + + void cb_nice_recv(NiceAgent* agent, guint stream_id, guint component_id, + guint len, gchar* buf, gpointer user_data) { + + // printf( "cb_nice_recv len %u id %u\n",len, stream_id ); + NiceConnection* nicecon = (NiceConnection*) user_data; + nicecon->getWebRtcConnection()->receiveNiceData((char*) buf, (int) len, + (NiceConnection*) user_data); + } + + void cb_candidate_gathering_done(NiceAgent *agent, guint stream_id, + gpointer user_data) { + + NiceConnection *conn = (NiceConnection*) user_data; + //printf("ConnState %u\n",conn->state); + // ... Wait until the signal candidate-gathering-done is fired ... + int currentCompId = 1; + lcands = nice_agent_get_local_candidates(agent, stream_id, currentCompId++); + NiceCandidate *cand; + GSList* iterator; + // printf("gathering done %u\n",stream_id); + //printf("Candidates---------------------------------------------------->\n"); + while (lcands != NULL) { + for (iterator = lcands; iterator; iterator = iterator->next) { + char address[40]; + cand = (NiceCandidate*) iterator->data; + nice_address_to_string(&cand->addr, address); + if (strstr(address, ":") != NULL) { + printf("Ignoring IPV6 candidate\n"); + continue; + + } + // printf("foundation %s\n", cand->foundation); + // printf("compid %u\n", cand->component_id); + // printf("stream_id %u\n", cand->stream_id); + // printf("priority %u\n", cand->priority); + // printf("username %s\n", cand->username); + // printf("password %s\n", cand->password); + CandidateInfo cand_info; + cand_info.componentId = cand->component_id; + cand_info.foundation = cand->foundation; + cand_info.priority = cand->priority; + cand_info.hostAddress = std::string(address); + cand_info.hostPort = nice_address_get_port(&cand->addr); + cand_info.mediaType = conn->mediaType; + + /* + * NICE_CANDIDATE_TYPE_HOST, + * NICE_CANDIDATE_TYPE_SERVER_REFLEXIVE, + * NICE_CANDIDATE_TYPE_PEER_REFLEXIVE, + * NICE_CANDIDATE_TYPE_RELAYED, + */ + switch (cand->type) { + case NICE_CANDIDATE_TYPE_HOST: + cand_info.hostType = HOST; + break; + case NICE_CANDIDATE_TYPE_SERVER_REFLEXIVE: + cand_info.hostType = SRLFX; + break; + case NICE_CANDIDATE_TYPE_PEER_REFLEXIVE: + cand_info.hostType = PRFLX; + break; + case NICE_CANDIDATE_TYPE_RELAYED: + printf("WARNING TURN NOT IMPLEMENTED YET\n"); + cand_info.hostType = RELAY; + break; + default: + break; + } + cand_info.netProtocol = "udp"; + cand_info.transProtocol = std::string(*conn->transportName); + //cand_info.username = std::string(cand->username); + if (cand->username) + cand_info.username = std::string(cand->username); + else + cand_info.username = std::string("(null)"); + + if (cand->password) + cand_info.password = std::string(cand->password); + else + cand_info.password = std::string("(null)"); + + conn->localCandidates->push_back(cand_info); + } + lcands = nice_agent_get_local_candidates(agent, stream_id, + currentCompId++); + } + printf("candidate_gathering done, size %u\n", + conn->localCandidates->size()); + conn->updateIceState(CANDIDATES_GATHERED); + } + + void cb_component_state_changed(NiceAgent *agent, guint stream_id, + guint component_id, guint state, gpointer user_data) { + printf("cb_component_state_changed %u\n", state); + if (state == NICE_COMPONENT_STATE_READY) { + NiceConnection *conn = (NiceConnection*) user_data; + conn->updateIceState(READY); + } else if (state == NICE_COMPONENT_STATE_FAILED) { + printf("Ice Component failed, stopping\n"); + NiceConnection *conn = (NiceConnection*) user_data; + conn->updateIceState(FAILED); + //conn->getWebRtcConnection()->close(); + } + + } + + void cb_new_selected_pair(NiceAgent *agent, guint stream_id, guint component_id, + gchar *lfoundation, gchar *rfoundation, gpointer user_data) { + printf( + "cb_new_selected_pair for stream %u, comp %u, lfound %s, rfound %s \n", + stream_id, component_id, lfoundation, rfoundation); + NiceConnection *conn = (NiceConnection*) user_data; + + printf( + "cb_new_selected_pair for stream %u, comp %u, lfound %s, rfound %s \n", + stream_id, component_id, lfoundation, rfoundation); + } + + NiceConnection::NiceConnection(MediaType med, + const std::string &transport_name) { + + agent_ = NULL; + loop_ = NULL; + conn_ = NULL; + mediaType = med; + localCandidates = new std::vector(); + transportName = new std::string(transport_name); + } + + NiceConnection::~NiceConnection() { + + if (iceState != FINISHED) + this->close(); + if (agent_) + g_object_unref(agent_); + if (localCandidates) + delete localCandidates; + if (transportName) + delete transportName; + } + + void NiceConnection::join() { + + m_Thread_.join(); + } + + void NiceConnection::start() { + + m_Thread_ = boost::thread(&NiceConnection::init, this); + } + + void NiceConnection::close() { + + if (agent_ != NULL) + nice_agent_remove_stream(agent_, 1); + if (loop_ != NULL) + g_main_loop_quit(loop_); + iceState = FINISHED; + } + + int NiceConnection::sendData(void *buf, int len) { + + int val = -1; + if (iceState == READY) { + val = nice_agent_send(agent_, 1, 1, len, (char*) buf); + } + return val; + } + + WebRtcConnection* NiceConnection::getWebRtcConnection() { + + return conn_; + } + + void NiceConnection::init() { + + streamsGathered = 0; + this->updateIceState(INITIAL); + + g_type_init(); + g_thread_init(NULL); + + loop_ = g_main_loop_new(NULL, FALSE); + // nice_debug_enable( TRUE ); + // Create a nice agent + agent_ = nice_agent_new(g_main_loop_get_context(loop_), + NICE_COMPATIBILITY_GOOGLE); + + // NiceAddress* naddr = nice_address_new(); + // nice_agent_add_local_address(agent_, naddr); + + GValue val = { 0 }, val2 = { 0 }; + + // g_value_init(&val, G_TYPE_STRING); + // g_value_set_string(&val, "173.194.70.126"); + // g_object_set_property(G_OBJECT( agent_ ), "stun-server", &val); + // + // g_value_init(&val2, G_TYPE_UINT); + // g_value_set_uint(&val2, 19302); + // g_object_set_property(G_OBJECT( agent_ ), "stun-server-port", &val2); + + // Connect the signals + g_signal_connect( G_OBJECT( agent_ ), "candidate-gathering-done", + G_CALLBACK( cb_candidate_gathering_done ), this); + g_signal_connect( G_OBJECT( agent_ ), "component-state-changed", + G_CALLBACK( cb_component_state_changed ), this); + g_signal_connect( G_OBJECT( agent_ ), "new-selected-pair", + G_CALLBACK( cb_new_selected_pair ), this); + + // Create a new stream and start gathering candidates + + int res = nice_agent_add_stream(agent_, 1); + // Set Port Range ----> If this doesn't work when linking the file libnice.sym has to be modified to include this call + // nice_agent_set_port_range(agent_, (guint)1, (guint)1, (guint)51000, (guint)52000); + + nice_agent_gather_candidates(agent_, 1); + nice_agent_attach_recv(agent_, 1, 1, g_main_loop_get_context(loop_), + cb_nice_recv, this); + + // Attach to the component to receive the data + g_main_loop_run(loop_); + } + + bool NiceConnection::setRemoteCandidates( + std::vector &candidates) { + + GSList* candList = NULL; + + for (unsigned int it = 0; it < candidates.size(); it++) { + NiceCandidateType nice_cand_type; + CandidateInfo cinfo = candidates[it]; + if (cinfo.mediaType != this->mediaType + || this->transportName->compare(cinfo.transProtocol)) + continue; + + switch (cinfo.hostType) { + case HOST: + nice_cand_type = NICE_CANDIDATE_TYPE_HOST; + break; + case SRLFX: + nice_cand_type = NICE_CANDIDATE_TYPE_SERVER_REFLEXIVE; + break; + case PRFLX: + nice_cand_type = NICE_CANDIDATE_TYPE_PEER_REFLEXIVE; + break; + case RELAY: + nice_cand_type = NICE_CANDIDATE_TYPE_RELAYED; + break; + default: + nice_cand_type = NICE_CANDIDATE_TYPE_HOST; + break; + } + + NiceCandidate* thecandidate = nice_candidate_new(nice_cand_type); + NiceAddress* naddr = nice_address_new(); + nice_address_set_from_string(naddr, cinfo.hostAddress.c_str()); + nice_address_set_port(naddr, cinfo.hostPort); + thecandidate->addr = *naddr; + char* uname = (char*) malloc(cinfo.username.size()); + char* pass = (char*) malloc(cinfo.password.size()); + sprintf(thecandidate->foundation, "%s", cinfo.foundation.c_str()); + sprintf(uname, "%s", cinfo.username.c_str()); + sprintf(pass, "%s", cinfo.password.c_str()); + + thecandidate->username = uname; + thecandidate->password = pass; + thecandidate->stream_id = (guint) 1; + thecandidate->component_id = cinfo.componentId; + thecandidate->priority = cinfo.priority; + thecandidate->transport = NICE_CANDIDATE_TRANSPORT_UDP; + candList = g_slist_append(candList, thecandidate); + + } + + nice_agent_set_remote_candidates(agent_, (guint) 1, 1, candList); + + printf("Candidates SET\n"); + this->updateIceState(CANDIDATES_RECEIVED); + return true; + } + + void NiceConnection::setWebRtcConnection(WebRtcConnection* connection) { + + this->conn_ = connection; + } + + void NiceConnection::updateIceState(IceState state) { + this->iceState = state; + if (this->conn_ != NULL) + this->conn_->updateState(state, this); + } + +} /* namespace erizo */ diff --git a/erizo/src/erizo/NiceConnection.h b/erizo/src/erizo/NiceConnection.h new file mode 100644 index 0000000000..2edd5f39ca --- /dev/null +++ b/erizo/src/erizo/NiceConnection.h @@ -0,0 +1,104 @@ +/* + * NiceConnection.h + */ + +#ifndef NICECONNECTION_H_ +#define NICECONNECTION_H_ + +#include +#include +#include + +#include "MediaDefinitions.h" +#include "SdpInfo.h" +#include "WebRtcConnection.h" + +typedef struct _NiceAgent NiceAgent; +typedef struct _GMainLoop GMainLoop; + +namespace erizo { +//forward declarations +struct CandidateInfo; +class WebRtcConnection; +/** + * An ICE connection via libNice + * Represents an ICE Connection in an new thread. + * + */ +class NiceConnection { +public: + + /** + * Constructs a new NiceConnection. + * @param med The MediaType of the connection. + * @param transportName The name of the transport protocol. Was used when WebRTC used video_rtp instead of just rtp. + */ + NiceConnection(MediaType med, const std::string &transportName); + virtual ~NiceConnection(); + /** + * Join to the internal thread of the NiceConnection. + */ + void join(); + /** + * Starts Gathering candidates in a new thread. + */ + void start(); + /** + * Closes the connection. It renders the object unusable. + */ + void close(); + /** + * Obtains the associated WebRtcConnection. + * @return A pointer to the WebRtcConnection. + */ + WebRtcConnection* getWebRtcConnection(); + /** + * Sets the remote ICE Candidates. + * @param candidates A vector containing the CandidateInfo. + * @return true if successfull. + */ + bool setRemoteCandidates(std::vector &candidates); + /** + * Sets the associated WebRTCConnection. + * @param connection Pointer to the WebRtcConenction. + */ + void setWebRtcConnection(WebRtcConnection *connection); + /** + * Sends data via the ICE Connection. + * @param buf Pointer to the data buffer. + * @param len Length of the Buffer. + * @return Bytes sent. + */ + int sendData(void* buf, int len); + + /** + * The MediaType of the connection + */ + MediaType mediaType; + /** + * The transport name + */ + std::string *transportName; + /** + * The state of the ice Connection + */ + IceState iceState; + /** + * The Obtained local candidates. + */ + std::vector* localCandidates; + + void updateIceState(IceState state); + + +private: + void init(); + NiceAgent* agent_; + WebRtcConnection* conn_; + GMainLoop* loop_; + boost::thread m_Thread_; + +}; + +} /* namespace erizo */ +#endif /* NICECONNECTION_H_ */ diff --git a/erizo/src/erizo/OneToManyProcessor.cpp b/erizo/src/erizo/OneToManyProcessor.cpp new file mode 100644 index 0000000000..a1a7187c0d --- /dev/null +++ b/erizo/src/erizo/OneToManyProcessor.cpp @@ -0,0 +1,86 @@ +/* + * OneToManyProcessor.cpp + */ + +#include "OneToManyProcessor.h" +#include "WebRtcConnection.h" + +namespace erizo { +OneToManyProcessor::OneToManyProcessor() : + MediaReceiver() { + + sendVideoBuffer_ = (char*) malloc(2000); + sendAudioBuffer_ = (char*) malloc(2000); + publisher = NULL; + sentPackets_ = 0; + +} + +OneToManyProcessor::~OneToManyProcessor() { + this->closeAll(); + if (sendVideoBuffer_) + delete sendVideoBuffer_; + if (sendAudioBuffer_) + delete sendAudioBuffer_; +} + +int OneToManyProcessor::receiveAudioData(char* buf, int len) { + + if (subscribers.empty() || len <= 0) + return 0; + + std::map::iterator it; + for (it = subscribers.begin(); it != subscribers.end(); it++) { + memset(sendAudioBuffer_, 0, len); + memcpy(sendAudioBuffer_, buf, len); + (*it).second->receiveAudioData(sendAudioBuffer_, len); + } + + return 0; +} + +int OneToManyProcessor::receiveVideoData(char* buf, int len) { + if (subscribers.empty() || len <= 0) + return 0; + if (sentPackets_ % 500 == 0) { + publisher->sendFirPacket(); + } + std::map::iterator it; + for (it = subscribers.begin(); it != subscribers.end(); it++) { + memset(sendVideoBuffer_, 0, len); + memcpy(sendVideoBuffer_, buf, len); + (*it).second->receiveVideoData(sendVideoBuffer_, len); + } + sentPackets_++; + return 0; +} + +void OneToManyProcessor::setPublisher(WebRtcConnection* webRtcConn) { + + this->publisher = webRtcConn; +} + +void OneToManyProcessor::addSubscriber(WebRtcConnection* webRtcConn, + int peerId) { + + this->subscribers[peerId] = webRtcConn; +} + +void OneToManyProcessor::removeSubscriber(int peerId) { + + if (this->subscribers.find(peerId) != subscribers.end()) { + this->subscribers[peerId]->close(); + this->subscribers.erase(peerId); + } +} + +void OneToManyProcessor::closeAll() { + std::map::iterator it; + for (it = subscribers.begin(); it != subscribers.end(); it++) { + (*it).second->close(); + } + this->publisher->close(); +} + +}/* namespace erizo */ + diff --git a/erizo/src/erizo/OneToManyProcessor.h b/erizo/src/erizo/OneToManyProcessor.h new file mode 100644 index 0000000000..5f150addd3 --- /dev/null +++ b/erizo/src/erizo/OneToManyProcessor.h @@ -0,0 +1,57 @@ +/* + * OneToManyProcessor.h + */ + +#ifndef ONETOMANYPROCESSOR_H_ +#define ONETOMANYPROCESSOR_H_ + +#include + +#include "MediaDefinitions.h" + +namespace erizo{ + +class WebRtcConnection; + +/** + * Represents a One to Many connection. + * Receives media from one publisher and retransmits it to every subscriber. + */ +class OneToManyProcessor : public MediaReceiver { +public: + WebRtcConnection *publisher; + std::map subscribers; + + OneToManyProcessor(); + virtual ~OneToManyProcessor(); + /** + * Sets the Publisher + * @param webRtcConn The WebRtcConnection of the Publisher + */ + void setPublisher(WebRtcConnection* webRtcConn); + /** + * Sets the subscriber + * @param webRtcConn The WebRtcConnection of the subscriber + * @param peerId An unique Id for the subscriber + */ + void addSubscriber(WebRtcConnection* webRtcConn, int peerId); + /** + * Eliminates the subscriber given its peer id + * @param peerId the peerId + */ + void removeSubscriber(int peerId); + int receiveAudioData(char* buf, int len); + int receiveVideoData(char* buf, int len); + /** + * Closes all the subscribers and the publisher, the object is useless after this + */ + void closeAll(); + +private: + char* sendVideoBuffer_; + char* sendAudioBuffer_; + unsigned int sentPackets_; +}; + +} /* namespace erizo */ +#endif /* ONETOMANYPROCESSOR_H_ */ diff --git a/erizo/src/erizo/RTPSink.cpp b/erizo/src/erizo/RTPSink.cpp new file mode 100644 index 0000000000..8ef45661ed --- /dev/null +++ b/erizo/src/erizo/RTPSink.cpp @@ -0,0 +1,37 @@ +/* + * RTPSink.cpp + * + * Created on: Aug 2, 2012 + * Author: pedro + */ + +#include "RTPSink.h" +using boost::asio::ip::udp; + +namespace erizo { + +RTPSink::RTPSink(const std::string& url, const std::string& port) { + ioservice_ = new boost::asio::io_service; + resolver_ = new udp::resolver(*ioservice_); + socket_ = new udp::socket(*ioservice_, udp::endpoint(udp::v4(), 40000)); + query_ = new udp::resolver::query(udp::v4(), url.c_str(), port.c_str()); + iterator_ = resolver_->resolve(*query_); +} + +RTPSink::~RTPSink() { + + free(ioservice_); + free(resolver_); + free(socket_); + free(query_); + +} + +int RTPSink::sendData(unsigned char* buffer, int len) { +// printf("sending %u bytes\n", len); + socket_->send_to(boost::asio::buffer(buffer, len), *iterator_); + + return len; +} + +} /* namespace erizo */ diff --git a/erizo/src/erizo/RTPSink.h b/erizo/src/erizo/RTPSink.h new file mode 100644 index 0000000000..f4007ca268 --- /dev/null +++ b/erizo/src/erizo/RTPSink.h @@ -0,0 +1,34 @@ +/* + * RTPSink.h + * + * Created on: Aug 2, 2012 + * Author: pedro + */ + +#ifndef RTPSINK_H_ +#define RTPSINK_H_ + +#include + + +namespace erizo { + +class RTPSink { +public: + RTPSink(const std::string& url, const std::string& port); + int sendData(unsigned char* buffer, int len); + virtual ~RTPSink(); + +private: + + boost::asio::ip::udp::socket* socket_; + boost::asio::ip::udp::resolver* resolver_; + + boost::asio::ip::udp::resolver::query* query_; + boost::asio::io_service* ioservice_; + boost::asio::ip::udp::resolver::iterator iterator_; +}; + + +} /* namespace erizo */ +#endif /* RTPSINK_H_ */ diff --git a/erizo/src/erizo/SdpInfo.cpp b/erizo/src/erizo/SdpInfo.cpp new file mode 100644 index 0000000000..13a56d1eb0 --- /dev/null +++ b/erizo/src/erizo/SdpInfo.cpp @@ -0,0 +1,369 @@ +/* + * SDPProcessor.cpp + */ + +#include +#include +#include +#include + +#include "SdpInfo.h" + +using std::endl; +namespace erizo { + +SdpInfo::SdpInfo() { +} + +SdpInfo::~SdpInfo() { +} + +bool SdpInfo::initWithSdp(const std::string& sdp) { + processSdp(sdp); + return true; +} +void SdpInfo::addCandidate(const CandidateInfo& info) { + candidateVector_.push_back(info); + +} + +void SdpInfo::addCrypto(const CryptoInfo& info) { + cryptoVector_.push_back(info); +} + +std::string SdpInfo::getSdp() { + printf("1\n"); + std::ostringstream sdp; + sdp << "v=0\n" << "o=- 0 0 IN IP4 127.0.0.1\n" << "s=\n" << "t=0 0\n"; + bool bundle = candidateVector_[0].isBundle; + + if (bundle) { + sdp << "a=group:BUNDLE audio video\n"; + } + printf("2\n"); + //candidates audio + bool printedAudio = false, printedVideo = false; + for (unsigned int it = 0; it < candidateVector_.size(); it++) { + const CandidateInfo& cand = candidateVector_[it]; + std::string hostType_str; + switch (cand.hostType) { + case HOST: + hostType_str = "host"; + break; + case SRLFX: + hostType_str = "srflx"; + break; + case PRFLX: + hostType_str = "prflx"; + break; + case RELAY: + hostType_str = "relay"; + break; + default: + hostType_str = "host"; + break; + } + if (cand.mediaType == AUDIO_TYPE) { + if (!printedAudio) { + sdp << "m=audio " << cand.hostPort + << " RTP/SAVPF 103 104 0 8 106 105 13 126\n" + << "c=IN IP4 " << cand.hostAddress + << endl << "a=rtcp:" << candidateVector_[0].hostPort + << " IN IP4 " << cand.hostAddress + << endl; + printedAudio = true; + } + + sdp << "a=candidate:" << cand.foundation << " " << cand.componentId + << " " << cand.netProtocol << " " << cand.priority << " " + << cand.hostAddress << " " << cand.hostPort << " typ " + << hostType_str << " generation 0" << endl; + + if (iceUsername_.empty() && bundle) { + iceUsername_ = cand.username; + icePassword_ = cand.password; + } + } + } + printf("3\n"); + //crypto audio + if (printedAudio) { + sdp << "a=ice-ufrag:" << iceUsername_ << endl; + sdp << "a=ice-pwd:" << icePassword_ << endl; + sdp << "a=sendrecv" << endl; + sdp << "a=mid:audio\na=rtcp-mux\n"; + for (unsigned int it = 0; it < cryptoVector_.size(); it++) { + const CryptoInfo& cryp_info = cryptoVector_[it]; + if (cryp_info.mediaType == AUDIO_TYPE) { + sdp << "a=crypto:" << cryp_info.tag << " " + << cryp_info.cipherSuite << " " << "inline:" + << cryp_info.keyParams << endl; + } + } + + sdp +// << "a=rtpmap:103 ISAC/16000\na=rtpmap:104 ISAC/32000\na=rtpmap:0 PCMU/8000\n" +// "a=rtpmap:8 PCMA/8000\na=rtpmap:106 CN/32000\na=rtpmap:105 CN/16000\n" +// "a=rtpmap:13 CN/8000\na=rtpmap:126 telephone-event/8000\n"; + << "a=rtpmap:0 PCMU/8000\n"; + sdp << "a=ssrc:" << audioSsrc << " cname:o/i14u9pJrxRKAsu\na=ssrc:" + << audioSsrc + << " mslabel:048f838f-2dd1-4a98-ab9e-8eb5f00abab8\na=ssrc:" + << audioSsrc << " label:iSight integrada\n"; + + } + + for (unsigned int it = 0; it < candidateVector_.size(); it++) { + const CandidateInfo& cand = candidateVector_[it]; + std::string hostType_str; + switch (cand.hostType) { + case HOST: + hostType_str = "host"; + break; + case SRLFX: + hostType_str = "srflx"; + break; + case PRFLX: + hostType_str = "prflx"; + break; + case RELAY: + hostType_str = "relay"; + break; + default: + hostType_str = "host"; + break; + } + if (cand.mediaType == VIDEO_TYPE) { + if (!printedVideo) { + sdp << "m=video " << cand.hostPort << " RTP/SAVPF 100 101 102\n" + << "c=IN IP4 " << cand.hostAddress + << endl << "a=rtcp:" << candidateVector_[0].hostPort + << " IN IP4 " << cand.hostAddress + << endl; + printedVideo = true; + } + + sdp << "a=candidate:" << cand.foundation << " " << cand.componentId + << " " << cand.netProtocol << " " << cand.priority << " " + << cand.hostAddress << " " << cand.hostPort << " typ " + << hostType_str << " generation 0" << endl; + + if (iceUsername_.empty() && bundle) { + iceUsername_ = cand.username; + icePassword_ = cand.password; + } + } + } + //crypto audio + if (printedVideo) { + sdp << "a=ice-ufrag:" << iceUsername_ << endl; + sdp << "a=ice-pwd:" << icePassword_ << endl; + sdp << "a=sendrecv" << endl; + sdp << "a=mid:video\na=rtcp-mux\n"; + for (unsigned int it = 0; it < cryptoVector_.size(); it++) { + const CryptoInfo& cryp_info = cryptoVector_[it]; + if (cryp_info.mediaType == VIDEO_TYPE) { + sdp << "a=crypto:" << cryp_info.tag << " " + << cryp_info.cipherSuite << " " << "inline:" + << cryp_info.keyParams << endl; + } + } + + sdp + << "a=rtpmap:100 VP8/90000\n"/*a=rtpmap:101 red/90000\na=rtpmap:102 ulpfec/90000\n"*/; + sdp << "a=ssrc:" << videoSsrc << " cname:o/i14u9pJrxRKAsu\na=ssrc:" + << videoSsrc + << " mslabel:048f838f-2dd1-4a98-ab9e-8eb5f00abab8\na=ssrc:" + << videoSsrc << " label:iSight integrada\n"; + } + printf("4\n"); + + return sdp.str(); +} + +bool SdpInfo::processSdp(const std::string& sdp) { + + std::string strLine; + std::istringstream iss(sdp); + char* line = (char*) malloc(1000); + char** pieces = (char**) malloc(10000); + char** cryptopiece = (char**) malloc(5000); + + const char *cand = "a=candidate:"; + const char *crypto = "a=crypto:"; + //const char *mid = "a=mid:"; + const char *group = "a=group:"; + const char *video = "m=video"; + const char *audio = "m=audio"; + const char *ice_user = "a=ice-ufrag"; + const char *ice_pass = "a=ice-pwd"; + const char *ssrctag = "a=ssrc"; + MediaType mtype = OTHER_MEDIA; + bool bundle = false; + + while (std::getline(iss, strLine)) { + const char* theline = strLine.c_str(); + sprintf(line, "%s\n", theline); + char* isVideo = strstr(line, video); + char* isAudio = strstr(line, audio); + char* isGroup = strstr(line, group); + char* isCand = strstr(line, cand); + char* isCrypt = strstr(line, crypto); + char* isUser = strstr(line, ice_user); + char* isPass = strstr(line, ice_pass); + char* isSsrc = strstr(line, ssrctag); + +// char* ismid = strstr(line,mid); + if (isGroup) { + bundle = true; + } + if (isVideo) { + mtype = VIDEO_TYPE; + } + if (isAudio) { + mtype = AUDIO_TYPE; + } + if (isCand != NULL) { + char *pch; + pch = strtok(line, " :"); + pieces[0] = pch; + int i = 0; + while (pch != NULL) { + pch = strtok(NULL, " :"); + pieces[i++] = pch; + } + + processCandidate(pieces, i - 1, mtype); + } +// if(ismid!=NULL){ +// printf(" %s\n", ismid+6); +// if (!strcmp(ismid+6,"video")){ +// +// }else if(!strcmp(ismid+6,"audio")){ +// +// } +// +// } + if (isCrypt != NULL) { + // printf("crypt %s\n", isCrypt ); + CryptoInfo crypinfo; + char *pch; + pch = strtok(line, " :"); + cryptopiece[0] = pch; + int i = 0; + while (pch != NULL) { + pch = strtok(NULL, " :"); +// printf("cryptopiece %i es %s\n", i, pch); + cryptopiece[i++] = pch; + } + + crypinfo.cipherSuite = std::string(cryptopiece[1]); + crypinfo.keyParams = std::string(cryptopiece[3]); + crypinfo.mediaType = mtype; + cryptoVector_.push_back(crypinfo); + // sprintf(key, "%s",cryptopiece[3]); + // keys = g_slist_append(keys,key); + } + if (isUser) { + char *pch; + pch = strtok(line, " : \n"); + pch = strtok(NULL, " : \n"); + iceUsername_ = std::string(pch); + + } + if (isPass) { + char *pch; + pch = strtok(line, " : \n"); + pch = strtok(NULL, ": \n"); + icePassword_ = std::string(pch); + } + if (isSsrc) { + char *pch; + pch = strtok(line, " : \n"); + pch = strtok(NULL, ": \n"); + if (mtype == VIDEO_TYPE) { + videoSsrc = strtoul(pch, NULL, 10); + } else if (mtype == AUDIO_TYPE) { + audioSsrc = strtoul(pch, NULL, 10); + } + } + + } + free(line); + free(pieces); + free(cryptopiece); + + for (unsigned int i = 0; i < candidateVector_.size(); i++) { + CandidateInfo& c = candidateVector_[i]; + c.username = iceUsername_; + c.password = icePassword_; + c.isBundle = bundle; + } + + return true; +} + +std::vector& SdpInfo::getCandidateInfos() { + return candidateVector_; +} + +std::vector& SdpInfo::getCryptoInfos() { + return cryptoVector_; +} + +bool SdpInfo::processCandidate(char** pieces, int size, MediaType mediaType) { + + CandidateInfo cand; + const char* types_str[10] = { "host", "srflx", "prflx", "relay" }; + cand.mediaType = mediaType; + cand.foundation = pieces[0]; + cand.componentId = (unsigned int) strtoul(pieces[1], NULL, 10); + + cand.netProtocol = pieces[2]; + // libnice does not support tcp candidates, we ignore them + if (cand.netProtocol.compare("udp")) { + return false; + } +// a=candidate:0 1 udp 2130706432 138.4.4.143 52314 typ host generation 0 +// 0 1 2 3 4 5 6 7 8 9 + cand.priority = (unsigned int) strtoul(pieces[3], NULL, 10); + cand.hostAddress = std::string(pieces[4]); + cand.hostPort = (unsigned int) strtoul(pieces[5], NULL, 10); + if (strcmp(pieces[6], "typ")) { + return false; + } + unsigned int type = 1111; + int p; + for (p = 0; p < 4; p++) { + if (!strcmp(pieces[7], types_str[p])) { + type = p; + } + } + switch (type) { + case 0: + cand.hostType = HOST; + break; + case 1: + cand.hostType = SRLFX; + break; + case 2: + cand.hostType = PRFLX; + break; + case 3: + cand.hostType = RELAY; + break; + default: + cand.hostType = HOST; + break; + } + + if (type == 3) { + cand.relayAddress = std::string(pieces[8]); + cand.relayPort = (unsigned int) strtoul(pieces[9], NULL, 10); + } + candidateVector_.push_back(cand); + return true; +} + +}/* namespace erizo */ + diff --git a/erizo/src/erizo/SdpInfo.h b/erizo/src/erizo/SdpInfo.h new file mode 100644 index 0000000000..75b57faa64 --- /dev/null +++ b/erizo/src/erizo/SdpInfo.h @@ -0,0 +1,130 @@ +/* + * SDPProcessor.h + */ + +#ifndef SDPINFO_H_ +#define SDPINFO_H_ + +#include +#include + +namespace erizo { +/** + * ICE candidate types + */ +enum HostType { + HOST, SRLFX, PRFLX, RELAY +}; +/** + * Channel types + */ +enum MediaType { + VIDEO_TYPE, AUDIO_TYPE, BOTH_MEDIA, OTHER_MEDIA +}; +/** + * SRTP info. + */ +class CryptoInfo { +public: + CryptoInfo() : + tag(0) { + } + /** + * tag number + */ + int tag; + /** + * The cipher suite. Only AES_CM_128_HMAC_SHA1_80 is supported as of now. + */ + std::string cipherSuite; + /** + * The key + */ + std::string keyParams; + /** + * The MediaType + */ + MediaType mediaType; + +}; +/** + * Contains the information of an ICE Candidate + */ +class CandidateInfo { +public: + CandidateInfo() : + tag(0) { + } + bool isBundle; + int tag; + unsigned int priority; + unsigned int componentId; + std::string foundation; + std::string hostAddress; + std::string relayAddress; + int hostPort; + int relayPort; + std::string netProtocol; + HostType hostType; + std::string transProtocol; + std::string username; + std::string password; + MediaType mediaType; +}; +/** + * Contains the information of a single SDP. + * Used to parse and generate SDPs + */ +class SdpInfo { +public: + /** + * Constructor + */ + SdpInfo(); + virtual ~SdpInfo(); + /** + * Inits the object with a given SDP. + * @param sdp An string with the SDP. + * @return true if success + */ + bool initWithSdp(const std::string& sdp); + /** + * Adds a new candidate. + * @param info The CandidateInfo containing the new candidate + */ + void addCandidate(const CandidateInfo& info); + /** + * Adds SRTP info. + * @param info The CryptoInfo containing the information. + */ + void addCrypto(const CryptoInfo& info); + /** + * Gets the candidates. + * @return A vector containing the current candidates. + */ + std::vector& getCandidateInfos(); + /** + * Gets the SRTP information. + * @return A vector containing the CryptoInfo objects with the SRTP information. + */ + std::vector& getCryptoInfos(); + /** + * Gets the actual SDP. + * @return The SDP in string format. + */ + std::string getSdp(); + /** + * The audio and video SSRCs for this particular SDP. + */ + unsigned int audioSsrc, videoSsrc; + +private: + bool processSdp(const std::string& sdp); + bool processCandidate(char** pieces, int size, MediaType mediaType); + std::vector candidateVector_; + std::vector cryptoVector_; + std::string iceUsername_; + std::string icePassword_; +}; +}/* namespace erizo */ +#endif /* SDPPROCESSOR_H_ */ diff --git a/erizo/src/erizo/SrtpChannel.cpp b/erizo/src/erizo/SrtpChannel.cpp new file mode 100644 index 0000000000..40e795bef2 --- /dev/null +++ b/erizo/src/erizo/SrtpChannel.cpp @@ -0,0 +1,140 @@ +/* + * Srtpchannel.cpp + */ + +#include +#include + +#include "SrtpChannel.h" + +namespace erizo { + +SrtpChannel::SrtpChannel() { + + srtp_init(); + active_ = false; +} + +SrtpChannel::~SrtpChannel() { + + if (send_session_ != NULL) { + srtp_dealloc(send_session_); + } + if (receive_session_ != NULL) { + srtp_dealloc(receive_session_); + } + +} + +bool SrtpChannel::setRtpParams(char* sendingKey, char* receivingKey) { + printf("Configuring srtp local key %s remote key %s\n", sendingKey, + receivingKey); + configureSrtpSession(&send_session_, sendingKey, SENDING); + configureSrtpSession(&receive_session_, receivingKey, RECEIVING); + + active_ = true; + return active_; +} + +bool SrtpChannel::setRtcpParams(char* sendingKey, char* receivingKey) { + + return 0; +} + +int SrtpChannel::protectRtp(char* buffer, int *len) { + + if (!active_) + return 0; + int val = srtp_protect(send_session_, buffer, len); + if (val == 0) { + return 0; + } else { + printf("Error SRTP %u\n", val); + return -1; + } +} + +int SrtpChannel::unprotectRtp(char* buffer, int *len) { + + if (!active_) + return 0; + rtcpheader *chead = (rtcpheader*) buffer; + + if (chead->packettype == 200 || chead->packettype == 201) { +// printf("RTCP\n"); + *len = -1; + return -1; + } + // printf("Es RTP\n"); + int val = srtp_unprotect(receive_session_, (char*) buffer, len); + if (val == 0) { + return 0; + } else { + printf("Error SRTP %u\n", val); + return -1; + } +} + +int SrtpChannel::protectRtcp(char* buffer, int *len) { + + int val = srtp_protect_rtcp(send_session_, (char*) buffer, len); + if (val == 0) { + return 0; + } else { + printf("Error SRTP %u\n", val); + return -1; + } +} + +int SrtpChannel::unprotectRtcp(char* buffer, int *len) { + + int val = srtp_unprotect_rtcp(receive_session_, buffer, len); + if (val != err_status_ok) { + return 0; + } else { + printf("Error SRTP %u\n", val); + return -1; + } +} + +std::string SrtpChannel::generateBase64Key() { + + unsigned char key[30]; + crypto_get_random(key, 30); + gchar* base64key = g_base64_encode((guchar*) key, 30); + return std::string(base64key); +} + +bool SrtpChannel::configureSrtpSession(srtp_t *session, const char* key, + enum TransmissionType type) { + + srtp_policy_t policy; + memset(&policy, 0, sizeof(policy)); + crypto_policy_set_aes_cm_128_hmac_sha1_80(&policy.rtp); + crypto_policy_set_aes_cm_128_hmac_sha1_80(&policy.rtcp); + if (type == SENDING) { + policy.ssrc.type = ssrc_any_outbound; + } else { + + policy.ssrc.type = ssrc_any_inbound; + } + + policy.ssrc.value = 0; + policy.window_size = 1024; + policy.allow_repeat_tx = 1; + policy.next = NULL; + //printf("auth_tag_len %d\n", policy.rtp.auth_tag_len); + + gsize len = 0; + uint8_t *akey = (uint8_t*) g_base64_decode((gchar*) key, &len); + printf("set master key/salt to %s/", octet_string_hex_string(akey, 16)); + printf("%s\n", octet_string_hex_string(akey + 16, 14)); + // allocate and initialize the SRTP session + policy.key = akey; + srtp_create(session, &policy); +// return res!=0? false:true; + return true; +} + +} /*namespace erizo */ + diff --git a/erizo/src/erizo/SrtpChannel.h b/erizo/src/erizo/SrtpChannel.h new file mode 100644 index 0000000000..65b3bf37fb --- /dev/null +++ b/erizo/src/erizo/SrtpChannel.h @@ -0,0 +1,110 @@ +/* + * Srtpchannel.h + */ + +#ifndef SRTPCHANNEL_H_ +#define SRTPCHANNEL_H_ + +#include +#include +#include + +namespace erizo { + +typedef struct { + uint32_t cc :4; + uint32_t extension :1; + uint32_t padding :1; + uint32_t version :2; + uint32_t payloadtype :7; + uint32_t marker :1; + uint32_t seqnum :16; + uint32_t timestamp; + uint32_t ssrc; +} rtpheader; + +typedef struct { + uint32_t blockcount :5; + uint32_t padding :1; + uint32_t version :2; + uint32_t packettype :8; + uint32_t length :16; + uint32_t ssrc; +} rtcpheader; +/** + * A SRTP data Channel. + * Represents a SRTP Channel with keys for protecting and unprotecting RTP and RTCP data. + */ +class SrtpChannel { + +public: + /** + * The constructor. At this point the class is only initialized but it still needs the Key pair. + */ + SrtpChannel(); + virtual ~SrtpChannel(); + /** + * Protects RTP Data + * @param buffer Pointer to the buffer with the data. The protected data is returned here + * @param len Pointer to the length of the data. The length is returned here + * @return 0 or an error code + */ + int protectRtp(char* buffer, int *len); + /** + * Unprotects RTP Data + * @param buffer Pointer to the buffer with the data. The unprotected data is returned here + * @param len Pointer to the length of the data. The length is returned here + * @return 0 or an error code + */ + int unprotectRtp(char* buffer, int *len); + /** + * Protects RTCP Data + * @param buffer Pointer to the buffer with the data. The protected data is returned here + * @param len Pointer to the length of the data. The length is returned here + * @return 0 or an error code + */ + int protectRtcp(char* buffer, int *len); + /** + * Unprotects RTCP Data + * @param buffer Pointer to the buffer with the data. The unprotected data is returned here + * @param len Pointer to the length of the data. The length is returned here + * @return 0 or an error code + */ + int unprotectRtcp(char* buffer, int *len); + /** + * Sets a key pair for the RTP channel + * @param sendingKey The key for protecting data + * @param receivingKey The key for unprotecting data + * @return true if everything is ok + */ + bool setRtpParams(char* sendingKey, char* receivingKey); + /** + * Sets a key pair for the RTCP channel + * @param sendingKey The key for protecting data + * @param receivingKey The key for unprotecting data + * @return true if everything is ok + */ + bool setRtcpParams(char* sendingKey, char* receivingKey); + /** + * Generates a valid key and encodes it in Base64 + * @return The new key + */ + static std::string generateBase64Key(); + +private: + enum TransmissionType { + SENDING, RECEIVING + }; + + bool configureSrtpSession(srtp_t *session, const char* key, + enum TransmissionType type); + + bool active_; + srtp_t send_session_; + srtp_t receive_session_; + srtp_t rtcp_send_session_; + srtp_t rtcp_receive_session_; +}; + +} /* namespace erizo */ +#endif /* SRTPCHANNEL_H_ */ diff --git a/erizo/src/erizo/WebRtcConnection.cpp b/erizo/src/erizo/WebRtcConnection.cpp new file mode 100644 index 0000000000..dceb7f32f4 --- /dev/null +++ b/erizo/src/erizo/WebRtcConnection.cpp @@ -0,0 +1,418 @@ +/* + * WebRTCConnection.cpp + */ + +#include + +#include "WebRtcConnection.h" +#include "NiceConnection.h" + +#include "SdpInfo.h" + +namespace erizo { + +WebRtcConnection::WebRtcConnection() { + + video_ = 1; + audio_ = 1; + sequenceNumberFIR_ = 0; + bundle_ = true; + localVideoSsrc_ = 55543; + localAudioSsrc_ = 44444; + videoReceiver_ = NULL; + audioReceiver_ = NULL; + audioNice_ = NULL; + videoNice_ = NULL; + audioSrtp_ = NULL; + videoSrtp_ = NULL; + globalIceState_ = INITIAL; + connStateListener_ = NULL; + + sending = true; + send_Thread_ = boost::thread(&WebRtcConnection::sendLoop, this); + + if (!bundle_) { + if (video_) { + videoNice_ = new NiceConnection(VIDEO_TYPE, ""); + videoNice_->setWebRtcConnection(this); + videoSrtp_ = new SrtpChannel(); + CryptoInfo crytp; + crytp.cipherSuite = std::string("AES_CM_128_HMAC_SHA1_80"); + crytp.mediaType = VIDEO_TYPE; + std::string key = SrtpChannel::generateBase64Key(); + + crytp.keyParams = key; + crytp.tag = 0; + localSdp_.addCrypto(crytp); + localSdp_.videoSsrc = localVideoSsrc_; + } + + if (audio_) { + audioNice_ = new NiceConnection(AUDIO_TYPE, ""); + audioNice_->setWebRtcConnection(this); + audioSrtp_ = new SrtpChannel(); + CryptoInfo crytp; + crytp.cipherSuite = std::string("AES_CM_128_HMAC_SHA1_80"); + crytp.mediaType = AUDIO_TYPE; + crytp.tag = 1; + std::string key = SrtpChannel::generateBase64Key(); + crytp.keyParams = key; + localSdp_.addCrypto(crytp); + localSdp_.audioSsrc = localAudioSsrc_; + } + + } else { + videoNice_ = new NiceConnection(VIDEO_TYPE, ""); + videoNice_->setWebRtcConnection(this); + videoSrtp_ = new SrtpChannel(); + CryptoInfo crytpv; + crytpv.cipherSuite = std::string("AES_CM_128_HMAC_SHA1_80"); + crytpv.mediaType = VIDEO_TYPE; + std::string keyv = SrtpChannel::generateBase64Key(); + crytpv.keyParams = keyv; + crytpv.tag = 1; + localSdp_.addCrypto(crytpv); + localSdp_.videoSsrc = localVideoSsrc_; + // audioSrtp_ = new SrtpChannel(); + CryptoInfo crytpa; + crytpa.cipherSuite = std::string("AES_CM_128_HMAC_SHA1_80"); + crytpa.mediaType = AUDIO_TYPE; + //crytpa.tag = 1; + crytpa.tag = 1; + // std::string keya = SrtpChannel::generateBase64Key(); + crytpa.keyParams = keyv; + localSdp_.addCrypto(crytpa); + localSdp_.audioSsrc = localAudioSsrc_; + + } + + printf("WebRTCConnection constructed with video %d audio %d\n", video_, + audio_); +} + +WebRtcConnection::~WebRtcConnection() { + + this->close(); +} + +bool WebRtcConnection::init() { + + if (!bundle_) { + if (video_) + videoNice_->start(); + if (audio_) + audioNice_->start(); + } else { + videoNice_->start(); + } + return true; +} + +void WebRtcConnection::close() { + if (sending != false) { + sending = false; + send_Thread_.join(); + } + if (audio_) { + if (audioNice_ != NULL) { + audioNice_->close(); + audioNice_->join(); + delete audioNice_; + } + if (audioSrtp_ != NULL) + delete audioSrtp_; + } + if (video_) { + if (videoNice_ != NULL) { + videoNice_->close(); + videoNice_->join(); + delete videoNice_; + } + if (videoSrtp_ != NULL) + delete videoSrtp_; + } +} + +bool WebRtcConnection::setRemoteSdp(const std::string &sdp) { + + remoteSdp_.initWithSdp(sdp); + std::vector crypto_remote = remoteSdp_.getCryptoInfos(); + std::vector crypto_local = localSdp_.getCryptoInfos(); + video_ = false; + audio_ = false; + + CryptoInfo cryptLocal_video; + CryptoInfo cryptLocal_audio; + CryptoInfo cryptRemote_video; + CryptoInfo cryptRemote_audio; + + for (unsigned int it = 0; it < crypto_remote.size(); it++) { + CryptoInfo cryptemp = crypto_remote[it]; + if (cryptemp.mediaType == VIDEO_TYPE + && !cryptemp.cipherSuite.compare("AES_CM_128_HMAC_SHA1_80")) { + video_ = true; + cryptRemote_video = cryptemp; + } else if (cryptemp.mediaType == AUDIO_TYPE + && !cryptemp.cipherSuite.compare("AES_CM_128_HMAC_SHA1_80")) { + audio_ = true; + cryptRemote_audio = cryptemp; + } + } + for (unsigned int it = 0; it < crypto_local.size(); it++) { + CryptoInfo cryptemp = crypto_local[it]; + if (cryptemp.mediaType == VIDEO_TYPE + && !cryptemp.cipherSuite.compare("AES_CM_128_HMAC_SHA1_80")) { + cryptLocal_video = cryptemp; + } else if (cryptemp.mediaType == AUDIO_TYPE + && !cryptemp.cipherSuite.compare("AES_CM_128_HMAC_SHA1_80")) { + cryptLocal_audio = cryptemp; + } + } + if (!bundle_) { + if (video_) { + videoNice_->setRemoteCandidates(remoteSdp_.getCandidateInfos()); + videoSrtp_->setRtpParams((char*) cryptLocal_video.keyParams.c_str(), + (char*) cryptRemote_video.keyParams.c_str()); + + } + if (audio_) { + audioNice_->setRemoteCandidates(remoteSdp_.getCandidateInfos()); + audioSrtp_->setRtpParams((char*) cryptLocal_audio.keyParams.c_str(), + (char*) cryptRemote_audio.keyParams.c_str()); + } + } else { + videoNice_->setRemoteCandidates(remoteSdp_.getCandidateInfos()); + remoteVideoSSRC_ = remoteSdp_.videoSsrc; + remoteAudioSSRC_ = remoteSdp_.audioSsrc; + videoSrtp_->setRtpParams((char*) cryptLocal_video.keyParams.c_str(), + (char*) cryptRemote_video.keyParams.c_str()); + videoSrtp_->setRtcpParams((char*) cryptLocal_video.keyParams.c_str(), + (char*) cryptRemote_video.keyParams.c_str()); + // audioSrtp_->setRtpParams((char*)cryptLocal_audio.keyParams.c_str(), (char*)cryptRemote_audio.keyParams.c_str()); + + } + return true; +} + +std::string WebRtcConnection::getLocalSdp() { + std::vector *cands; + printf("Geting Local sdp\n"); + if (bundle_) { + if (videoNice_->iceState > CANDIDATES_GATHERED) { + cands = videoNice_->localCandidates; + for (unsigned int it = 0; it < cands->size(); it++) { + CandidateInfo cand = cands->at(it); + cand.isBundle = bundle_; + localSdp_.addCandidate(cand); + cand.mediaType = AUDIO_TYPE; + localSdp_.addCandidate(cand); + } + } else { + printf("WARNING getting local sdp before it is ready!\n"); + } + } else { + if (video_ && videoNice_->iceState > CANDIDATES_GATHERED) { + cands = videoNice_->localCandidates; + for (unsigned int it = 0; it < cands->size(); it++) { + CandidateInfo cand = cands->at(it); + localSdp_.addCandidate(cand); + } + } + if (audio_ && audioNice_->iceState > CANDIDATES_GATHERED) { + cands = audioNice_->localCandidates; + for (unsigned int it = 0; it < cands->size(); it++) { + CandidateInfo cand = cands->at(it); + localSdp_.addCandidate(cand); + } + } + } + return localSdp_.getSdp(); +} + +void WebRtcConnection::setAudioReceiver(MediaReceiver *receiv) { + + this->audioReceiver_ = receiv; +} + +void WebRtcConnection::setVideoReceiver(MediaReceiver *receiv) { + + this->videoReceiver_ = receiv; +} + +int WebRtcConnection::receiveAudioData(char* buf, int len) { + boost::mutex::scoped_lock lock(receiveAudioMutex_); + int res = -1; + int length = len; + if (audioSrtp_) { + audioSrtp_->protectRtp(buf, &length); + // printf("A mandar %d\n", length); + } + if (len <= 0) + return length; + if (audioNice_) { + res = audioNice_->sendData(buf, length); + } + return res; +} + +int WebRtcConnection::receiveVideoData(char* buf, int len) { + + int res = -1; + int length = len; + if (videoSrtp_ && videoNice_->iceState == READY) { + videoSrtp_->protectRtp(buf, &length); + } + if (length <= 10) + return length; + if (videoNice_->iceState == READY) { + receiveVideoMutex_.lock(); + if (sendQueue_.size() < 1000) { + packet p_; + memset(p_.data, 0, length); + memcpy(p_.data, buf, length); + + p_.length = length; + sendQueue_.push(p_); + } + receiveVideoMutex_.unlock(); + } + return res; +} + +int WebRtcConnection::receiveNiceData(char* buf, int len, + NiceConnection* nice) { + // printf("Receive Nice Data %d, type %d\n", len, nice->mediaType); + boost::mutex::scoped_lock lock(writeMutex_); + if (audioReceiver_ == NULL && videoReceiver_ == NULL) + return 0; + + int length = len; + if (bundle_) { + if (videoSrtp_) { + videoSrtp_->unprotectRtp(buf, &length); + } + if (length <= 0) + return length; + rtpheader* inHead = (rtpheader*) buf; + if (inHead->ssrc == htonl(remoteVideoSSRC_)) { + inHead->ssrc = htonl(localVideoSsrc_); + videoReceiver_->receiveVideoData(buf, length); + + } else if (inHead->ssrc == htonl(remoteAudioSSRC_)) { + inHead->ssrc = htonl(localAudioSsrc_); + videoReceiver_->receiveVideoData(buf, length); // We send it via the video nice, the only one we have + } else { + printf("Unknown SSRC, ignoring\n"); + } + return length; + + } + + if (nice->mediaType == AUDIO_TYPE) { + if (audioReceiver_ != NULL) { + if (audioSrtp_) { + audioSrtp_->unprotectRtp(buf, &length); + } + if (length <= 0) + return length; + rtpheader *head = (rtpheader*) buf; + head->ssrc = htonl(localAudioSsrc_); + audioReceiver_->receiveAudioData(buf, length); + return length; + } + } else if (nice->mediaType == VIDEO_TYPE) { + if (videoReceiver_ != NULL) { + if (videoSrtp_) { + videoSrtp_->unprotectRtp(buf, &length); + } + if (length <= 0) + return length; + rtpheader *head = (rtpheader*) buf; + head->ssrc = htonl(localVideoSsrc_); + videoReceiver_->receiveVideoData(buf, length); + return length; + } + } + return -1; +} + +int WebRtcConnection::sendFirPacket() { + sequenceNumberFIR_++; // do not increase if repetition + int pos = 0; + uint8_t rtcpPacket[50]; + // add full intra request indicator + uint8_t FMT = 4; + rtcpPacket[pos++] = (uint8_t) 0x80 + FMT; + rtcpPacket[pos++] = (uint8_t) 206; + + //Length of 4 + rtcpPacket[pos++] = (uint8_t) 0; + rtcpPacket[pos++] = (uint8_t) (4); + + // Add our own SSRC + uint32_t* ptr = reinterpret_cast(rtcpPacket + pos); + ptr[0] = htonl(localVideoSsrc_); + pos += 4; + + rtcpPacket[pos++] = (uint8_t) 0; + rtcpPacket[pos++] = (uint8_t) 0; + rtcpPacket[pos++] = (uint8_t) 0; + rtcpPacket[pos++] = (uint8_t) 0; + // Additional Feedback Control Information (FCI) + uint32_t* ptr2 = reinterpret_cast(rtcpPacket + pos); + ptr2[0] = htonl(remoteVideoSSRC_); + pos += 4; + + rtcpPacket[pos++] = (uint8_t) (sequenceNumberFIR_); + rtcpPacket[pos++] = (uint8_t) 0; + rtcpPacket[pos++] = (uint8_t) 0; + rtcpPacket[pos++] = (uint8_t) 0; + if (videoSrtp_ != NULL && videoNice_ != NULL + && videoNice_->iceState == READY) { + videoSrtp_->protectRtcp((char*) rtcpPacket, &pos); + videoNice_->sendData((char*) rtcpPacket, pos); + } + return pos; +} + +void WebRtcConnection::setWebRTCConnectionStateListener( + WebRtcConnectionStateListener* listener) { + this->connStateListener_ = listener; +} + +void WebRtcConnection::updateState(IceState newState, + NiceConnection* niceConn) { + + if (bundle_) { + if (newState == globalIceState_) + return; + globalIceState_ = newState; + if (connStateListener_ != NULL) + connStateListener_->connectionStateChanged(globalIceState_); +// if (newState == FAILED) +// this->close(); + } + +} + +IceState WebRtcConnection::getCurrentState() { + return globalIceState_; +} + +void WebRtcConnection::sendLoop() { + + while (sending == true) { + receiveVideoMutex_.lock(); + if (sendQueue_.size() > 0) { + videoNice_->sendData(sendQueue_.front().data, + sendQueue_.front().length); + sendQueue_.pop(); + receiveVideoMutex_.unlock(); + } else { + receiveVideoMutex_.unlock(); + usleep(1000); + } + } +} + +} /* namespace erizo */ diff --git a/erizo/src/erizo/WebRtcConnection.h b/erizo/src/erizo/WebRtcConnection.h new file mode 100644 index 0000000000..213aad02cf --- /dev/null +++ b/erizo/src/erizo/WebRtcConnection.h @@ -0,0 +1,135 @@ +#ifndef WEBRTCCONNECTION_H_ +#define WEBRTCCONNECTION_H_ + +#include +#include +#include +#include + +#include "SrtpChannel.h" +#include "SdpInfo.h" +#include "MediaDefinitions.h" + +namespace erizo { + +class NiceConnection; +/** + * States of ICE + */ +enum IceState { + INITIAL, CANDIDATES_GATHERED, CANDIDATES_RECEIVED, READY, FINISHED, FAILED +}; + +class WebRtcConnectionStateListener { +public: + virtual ~WebRtcConnectionStateListener() { + } + ; + virtual void connectionStateChanged(IceState newState)=0; + +}; + +/** + * A WebRTC Connection. This class represents a WebRTC Connection that can be established with other peers via a SDP negotiation + * it comprises all the necessary ICE and SRTP components. + */ +class WebRtcConnection: public MediaReceiver, public NiceReceiver { +public: + + /** + * Constructor. + * Constructs an empty WebRTCConnection without any configuration. + */ + WebRtcConnection(); + /** + * Destructor. + */ + virtual ~WebRtcConnection(); + /** + * Inits the WebConnection by starting ICE Candidate Gathering. + * @return True if the candidates are gathered. + */ + bool init(); + /** + * Closes the webRTC connection. + * The object cannot be used after this call. + */ + void close(); + /** + * Sets the SDP of the remote peer. + * @param sdp The SDP. + * @return true if the SDP was received correctly. + */ + bool setRemoteSdp(const std::string &sdp); + /** + * Obtains the local SDP. + * @return The SDP as a string. + */ + std::string getLocalSdp(); + + int receiveAudioData(char* buf, int len); + int receiveVideoData(char* buf, int len); + /** + * Sets a MediaReceiver that is going to receive Audio Data + * @param receiv The MediaReceiver to send audio to. + */ + void setAudioReceiver(MediaReceiver *receiv); + /** + * Sets a MediaReceiver that is going to receive Video Data + * @param receiv The MediaReceiver + */ + void setVideoReceiver(MediaReceiver *receiv); + /** + * Method to Receive data from a NiceConnection + * @param buf The data buffer + * @param len The length of the buffer + * @param nice The NiceConnection orgi + * @return + */ + + int receiveNiceData(char* buf, int len, NiceConnection *nice); + + /** + * Sends a FIR Packet (RFC 5104) asking for a keyframe + * @return the size of the data sent + */ + int sendFirPacket(); + + void setWebRTCConnectionStateListener( + WebRtcConnectionStateListener* listener); + /** + * Gets the current state of the Ice Connection + * @return + */ + IceState getCurrentState(); + +private: + SdpInfo remoteSdp_; + SdpInfo localSdp_; + NiceConnection* audioNice_; + NiceConnection* videoNice_; + SrtpChannel* audioSrtp_; + SrtpChannel* videoSrtp_; + IceState globalIceState_; + + MediaReceiver* audioReceiver_; + MediaReceiver* videoReceiver_; + int video_, audio_, bundle_, sequenceNumberFIR_; + unsigned int localAudioSsrc_, localVideoSsrc_; + unsigned int remoteAudioSSRC_, remoteVideoSSRC_; + boost::mutex writeMutex_, receiveAudioMutex_, receiveVideoMutex_; + boost::thread send_Thread_; + std::queue sendQueue_; + WebRtcConnectionStateListener* connStateListener_; + + void updateState(IceState newState, NiceConnection* niceConn); + + bool sending; + void sendLoop(); + + friend class NiceConnection; + +}; + +} /* namespace erizo */ +#endif /* WEBRTCCONNECTION_H_ */ diff --git a/erizo/src/erizo/media/MediaProcessor.cpp b/erizo/src/erizo/media/MediaProcessor.cpp new file mode 100644 index 0000000000..b07e2ad4e7 --- /dev/null +++ b/erizo/src/erizo/media/MediaProcessor.cpp @@ -0,0 +1,540 @@ +#include + +#include "MediaProcessor.h" +#include "rtp/RtpVP8Fragmenter.h" +#include "rtp/RtpHeader.h" +#include "codecs/VideoCodec.h" + +namespace erizo { + + InputProcessor::InputProcessor() { + + audioDecoder = 0; + videoDecoder = 0; + + audioUnpackager = 0; + videoUnpackager = 0; + gotUnpackagedFrame_ = false; + upackagedSize_ = 0; + decodedBuffer_ = NULL; + + av_register_all(); + } + + InputProcessor::~InputProcessor() { + if (audioDecoder == 1) { + avcodec_close(aDecoderContext); + av_free(aDecoderContext); + } + + if (videoDecoder == 1) { + vDecoder.closeDecoder(); + } + if (decodedBuffer_ != NULL) { + free(decodedBuffer_); + } + } + + int InputProcessor::init(const MediaInfo& info, RawDataReceiver* receiver) { + this->mediaInfo = info; + this->rawReceiver_ = receiver; + if (mediaInfo.hasVideo) { + mediaInfo.videoCodec.codec = VIDEO_CODEC_VP8; + decodedBuffer_ = (unsigned char*) malloc( + info.videoCodec.width * info.videoCodec.height * 3 / 2); + unpackagedBuffer_ = (unsigned char*) malloc(UNPACKAGED_BUFFER_SIZE); + if(!vDecoder.initDecoder(mediaInfo.videoCodec)); + videoDecoder = 1; + if(!this->initVideoUnpackager()); + } + if (mediaInfo.hasAudio) { + printf("Init AUDIO processor\n"); + mediaInfo.audioCodec.codec = AUDIO_CODEC_PCM_MULAW_8; + decodedAudioBuffer_ = (unsigned char*) malloc(UNPACKAGED_BUFFER_SIZE); + unpackagedAudioBuffer_ = (unsigned char*) malloc( + UNPACKAGED_BUFFER_SIZE); + this->initAudioDecoder(); + this->initAudioUnpackager(); + } + return 0; + } + + int InputProcessor::receiveAudioData(char* buf, int len) { + if (audioDecoder && audioUnpackager) { + printf("Decoding audio\n"); + int unp = unpackageAudio((unsigned char*) buf, len, + unpackagedAudioBuffer_); + int a = decodeAudio(unpackagedAudioBuffer_, unp, decodedAudioBuffer_); + printf("DECODED AUDIO a %d\n", a); + RawDataPacket p; + p.data = decodedAudioBuffer_; + p.type = AUDIO; + p.length = a; + rawReceiver_->receiveRawData(p); + } + } + int InputProcessor::receiveVideoData(char* buf, int len) { + if (videoUnpackager && videoDecoder) { + int ret = unpackageVideo(reinterpret_cast(buf), len, + unpackagedBuffer_, &gotUnpackagedFrame_); + if (ret < 0) + return 0; + upackagedSize_ += ret; + unpackagedBuffer_ += ret; + if (gotUnpackagedFrame_) { + unpackagedBuffer_ -= upackagedSize_; + printf("Tengo un frame desempaquetado!! Size = %d\n", + upackagedSize_); + int c; + int gotDecodedFrame = 0; + + c = vDecoder.decodeVideo(unpackagedBuffer_, upackagedSize_, + decodedBuffer_, + mediaInfo.videoCodec.width * mediaInfo.videoCodec.height * 3 + / 2, &gotDecodedFrame); + + upackagedSize_ = 0; + gotUnpackagedFrame_ = 0; + printf("Bytes dec = %d\n", c); + if (gotDecodedFrame && c > 0) { + printf("Tengo un frame decodificado!!\n"); + gotDecodedFrame = 0; + RawDataPacket p; + p.data = decodedBuffer_; + p.length = c; + p.type = VIDEO; + rawReceiver_->receiveRawData(p); + } + } + return 1; + } + return 1; + } + + bool InputProcessor::initAudioDecoder() { + + aDecoder = avcodec_find_decoder(static_cast(mediaInfo.audioCodec.codec)); + if (!aDecoder) { + printf("Decoder de audio no encontrado"); + return false; + } + + aDecoderContext = avcodec_alloc_context3(aDecoder); + if (!aDecoderContext) { + printf("Error de memoria en decoder de audio"); + return false; + } + + aDecoderContext->sample_fmt = AV_SAMPLE_FMT_S16; + aDecoderContext->bit_rate = mediaInfo.audioCodec.bitRate; + aDecoderContext->sample_rate = mediaInfo.audioCodec.sampleRate; + aDecoderContext->channels = 1; + + if (avcodec_open2(aDecoderContext, aDecoder, NULL) < 0) { + printf("Error al abrir el decoder de audio\n"); + exit(0); + return false; + } + printf("EEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEE %d\n", aDecoderContext->frame_size); + audioDecoder = 1; + return true; + + } + + bool InputProcessor::initAudioUnpackager() { + audioUnpackager = 1; + return true; + } + + bool InputProcessor::initVideoUnpackager() { + videoUnpackager = 1; + return true; + + } + + int InputProcessor::decodeAudio(unsigned char* inBuff, int inBuffLen, + unsigned char* outBuff) { + + if (audioDecoder == 0) { + printf("No se han inicializado los parámetros del audioDecoder\n"); + return -1; + } + + AVPacket avpkt; + int outSize; + int decSize = 0; + int len = -1; + uint8_t *decBuff = (uint8_t*) malloc(AVCODEC_MAX_AUDIO_FRAME_SIZE); + + av_init_packet(&avpkt); + avpkt.data = (unsigned char*) inBuff; + avpkt.size = inBuffLen; + + while (avpkt.size > 0) { + + outSize = AVCODEC_MAX_AUDIO_FRAME_SIZE; + + //Puede fallar. Cogido de libavcodec/utils.c del paso de avcodec_decode_audio3 a avcodec_decode_audio4 + //avcodec_decode_audio3(aDecoderContext, (short*)decBuff, &outSize, &avpkt); + + AVFrame frame; + int got_frame = 0; + + aDecoderContext->get_buffer = avcodec_default_get_buffer; + aDecoderContext->release_buffer = avcodec_default_release_buffer; + + len = avcodec_decode_audio4(aDecoderContext, &frame, &got_frame, + &avpkt); + if (len >= 0 && got_frame) { + int plane_size; + //int planar = av_sample_fmt_is_planar(aDecoderContext->sample_fmt); + int data_size = av_samples_get_buffer_size(&plane_size, + aDecoderContext->channels, frame.nb_samples, + aDecoderContext->sample_fmt, 1); + if (outSize < data_size) { + printf( + "output buffer size is too small for the current frame\n"); + return AVERROR(EINVAL); + } + + memcpy(decBuff, frame.extended_data[0], plane_size); + + /* Si hay más de un canal + if (planar && aDecoderContext->channels > 1) { + uint8_t *out = ((uint8_t *)decBuff) + plane_size; + for (int ch = 1; ch < aDecoderContext->channels; ch++) { + memcpy(out, frame.extended_data[ch], plane_size); + out += plane_size; + } + } + */ + outSize = data_size; + } else { + outSize = 0; + } + + if (len < 0) { + printf("Error al decodificar audio\n"); + free(decBuff); + return -1; + } + + avpkt.size -= len; + avpkt.data += len; + + if (outSize <= 0) { + continue; + } + + memcpy(outBuff, decBuff, outSize); + outBuff += outSize; + decSize += outSize; + } + + free(decBuff); + + if (outSize <= 0) { + printf("Error de decodificación de audio debido a tamaño incorrecto"); + return -1; + } + + return decSize; + + } + + int InputProcessor::unpackageAudio(unsigned char* inBuff, int inBuffLen, + unsigned char* outBuff) { + + int l = inBuffLen - RTPHeader::MIN_SIZE; + memcpy(outBuff, &inBuff[RTPHeader::MIN_SIZE], l); + + return l; + } + + int InputProcessor::unpackageVideo(unsigned char* inBuff, int inBuffLen, + unsigned char* outBuff, int *gotFrame) { + + if (videoUnpackager == 0) { + printf("Unpackager not correctly initialized"); + return -1; + } + + int inBuffOffset = 0; + *gotFrame = 0; + RTPHeader* head = reinterpret_cast(inBuff); + + +// printf("PT %d, ssrc %u, extension %d\n", head->getPayloadType(), head->getSSRC(), +// head->getExtension()); + if ( head->getSSRC() != 55543 /*&& head->payloadtype!=101*/) { + return -1; + } + if (head->getPayloadType() != 100) { + return -1; + } + +// printf("RTP header length: %d", head->getHeaderLength()); //Should include extensions + int l = inBuffLen - head->getHeaderLength(); + inBuffOffset+=head->getHeaderLength(); + + erizo::RTPPayloadVP8* parsed = pars.parseVP8( + (unsigned char*) &inBuff[inBuffOffset], l); + memcpy(outBuff, parsed->data, parsed->dataLength); + if (head->getMarker()) { + printf("Marker\n"); + *gotFrame = 1; + } + return parsed->dataLength; + + } + + OutputProcessor::OutputProcessor() { + + audioCoder = 0; + videoCoder = 0; + + audioPackager = 0; + videoPackager = 0; + timestamp_ = 0; + + encodedBuffer_ = NULL; + packagedBuffer_ = NULL; + + avcodec_register_all(); + av_register_all(); + } + + OutputProcessor::~OutputProcessor() { + + if (audioCoder == 1) { + avcodec_close(aCoderContext); + av_free(aCoderContext); + } + + if (videoCoder == 1) { + vCoder.closeEncoder(); + } + if (encodedBuffer_) { + free(encodedBuffer_); + } + if (packagedBuffer_) { + free(packagedBuffer_); + } + if (rtpBuffer_) { + free(rtpBuffer_); + } + } + + int OutputProcessor::init(const MediaInfo& info, RTPDataReceiver* rtpReceiver) { + this->mediaInfo = info; + this->rtpReceiver_ = rtpReceiver; + + encodedBuffer_ = (unsigned char*) malloc(UNPACKAGED_BUFFER_SIZE); + packagedBuffer_ = (unsigned char*) malloc(PACKAGED_BUFFER_SIZE); + rtpBuffer_ = (unsigned char*) malloc(PACKAGED_BUFFER_SIZE); + + if (mediaInfo.hasVideo) { + this->mediaInfo.videoCodec.codec = VIDEO_CODEC_VP8; + if (vCoder.initEncoder(mediaInfo.videoCodec)) { + printf("Error initing encoder\n"); + } + this->initVideoPackager(); + } + if (mediaInfo.hasAudio) { + + printf("Init AUDIO processor\n"); + mediaInfo.audioCodec.codec = AUDIO_CODEC_PCM_MULAW_8; + mediaInfo.audioCodec.sampleRate= 44100; + mediaInfo.audioCodec.bitRate = 64000; + encodedAudioBuffer_ = (unsigned char*) malloc(UNPACKAGED_BUFFER_SIZE); + packagedAudioBuffer_ = (unsigned char*) malloc(UNPACKAGED_BUFFER_SIZE); + this->initAudioCoder(); + this->initAudioPackager(); + + } + + return 0; + } + + + void OutputProcessor::receiveRawData(RawDataPacket& packet) { + int hasFrame = 0; + if (packet.type == VIDEO) { + printf("Encoding video: size %d\n", packet.length); + int a = vCoder.encodeVideo(packet.data, packet.length, encodedBuffer_,UNPACKAGED_BUFFER_SIZE,hasFrame); + if (a > 0) + int b = this->packageVideo(encodedBuffer_, a, packagedBuffer_); + } else { +// int a = this->encodeAudio(packet.data, packet.length, &pkt); +// if (a > 0) { +// printf("GUAY a %d\n", a); +// } + + } +// av_free_packet(&pkt); + } + + bool OutputProcessor::initAudioCoder() { + + aCoder = avcodec_find_encoder(static_cast(mediaInfo.audioCodec.codec)); + if (!aCoder) { + printf("Encoder de audio no encontrado"); + exit(0); + return false; + } + + aCoderContext = avcodec_alloc_context3(aCoder); + if (!aCoderContext) { + printf("Error de memoria en coder de audio"); + exit(0); + return false; + } + + aCoderContext->sample_fmt = AV_SAMPLE_FMT_S16; + aCoderContext->bit_rate = mediaInfo.audioCodec.bitRate; + aCoderContext->sample_rate = mediaInfo.audioCodec.sampleRate; + aCoderContext->channels = 1; + + if (avcodec_open2(aCoderContext, aCoder, NULL) < 0) { + printf("Error al abrir el coder de audio"); + exit(0); + return false; + } + + audioCoder = 1; + return true; + } + + bool OutputProcessor::initAudioPackager() { + audioPackager = 1; + return true; + } + + bool OutputProcessor::initVideoPackager() { + seqnum_ = 0; + videoPackager = 1; + return true; + } + + int OutputProcessor::packageAudio(unsigned char* inBuff, int inBuffLen, + unsigned char* outBuff) { + + if (audioPackager == 0) { + printf("No se ha inicializado el codec de output audio RTP"); + return -1; + } + + + timeval time; + gettimeofday(&time, NULL); + long millis = (time.tv_sec * 1000) + (time.tv_usec / 1000); + + RTPHeader head; + head.setSeqNumber(seqnum_++); + head.setTimestamp(millis*8); + head.setSSRC(55543); + head.setPayloadType(0); + + memcpy (rtpBuffer_, &head, head.getHeaderLength()); + memcpy(&rtpBuffer_[head.getHeaderLength()], inBuff, inBuffLen); + // sink_->sendData(rtpBuffer_, l); + // rtpReceiver_->receiveRtpData(rtpBuffer_, (inBuffLen + RTP_HEADER_LEN)); + } + + int OutputProcessor::packageVideo(unsigned char* inBuff, int buffSize, unsigned char* outBuff) { + if (videoPackager == 0) { + printf("No se ha inicailizado el codec de output vídeo RTP"); + return -1; + } + + printf("To packetize %u\n", buffSize); + if (buffSize <= 0) + return -1; + RtpVP8Fragmenter frag(inBuff, buffSize, 1100); + bool lastFrame = false; + unsigned int outlen = 0; + timeval time; + gettimeofday(&time, NULL); + long millis = (time.tv_sec * 1000) + (time.tv_usec / 1000); + // timestamp_ += 90000 / mediaInfo.videoCodec.frameRate; + + do { + outlen = 0; + frag.getPacket(outBuff, &outlen, &lastFrame); + RTPHeader rtpHeader; + rtpHeader.setMarker(lastFrame?1:0); + rtpHeader.setSeqNumber(seqnum_++); + rtpHeader.setTimestamp(millis*90); + rtpHeader.setSSRC(55543); + rtpHeader.setPayloadType(100); + memcpy(rtpBuffer_, &rtpHeader, rtpHeader.getHeaderLength()); + memcpy(&rtpBuffer_[rtpHeader.getHeaderLength()],outBuff, outlen); + + int l = outlen + rtpHeader.getHeaderLength(); + // sink_->sendData(rtpBuffer_, l); + rtpReceiver_->receiveRtpData(rtpBuffer_, l); + } while (!lastFrame); + + return 0; + } + + int OutputProcessor::encodeAudio(unsigned char* inBuff, int nSamples, + AVPacket* pkt) { + + if (audioCoder == 0) { + printf("No se han inicializado los parámetros del audioCoder"); + return -1; + } + + AVFrame *frame; + /* frame containing input raw audio */ + frame = avcodec_alloc_frame(); + if (!frame) { + fprintf(stderr, "could not allocate audio frame\n"); + exit(1); + } + uint16_t* samples; + int ret, got_output, buffer_size; + float t, tincr; + + frame->nb_samples = aCoderContext->frame_size; + frame->format = aCoderContext->sample_fmt; + // frame->channel_layout = aCoderContext->channel_layout; + + /* the codec gives us the frame size, in samples, + * we calculate the size of the samples buffer in bytes */ + printf("channels %d, frame_size %d, sample_fmt %d\n", + aCoderContext->channels, aCoderContext->frame_size, + aCoderContext->sample_fmt); + buffer_size = av_samples_get_buffer_size(NULL, aCoderContext->channels, + aCoderContext->frame_size, aCoderContext->sample_fmt, 0); + samples = (uint16_t*) av_malloc(buffer_size); + if (!samples) { + fprintf(stderr, "could not allocate %d bytes for samples buffer\n", + buffer_size); + exit(1); + } + /* setup the data pointers in the AVFrame */ + ret = avcodec_fill_audio_frame(frame, aCoderContext->channels, + aCoderContext->sample_fmt, (const uint8_t*) samples, buffer_size, + 0); + if (ret < 0) { + fprintf(stderr, "could not setup audio frame\n"); + exit(1); + } + + ret = avcodec_encode_audio2(aCoderContext, pkt, frame, &got_output); + if (ret < 0) { + fprintf(stderr, "error encoding audio frame\n"); + exit(1); + } + if (got_output) { + //fwrite(pkt.data, 1, pkt.size, f); + printf("Got OUTPUT\n"); + } + + return ret; + + } + +} /* namespace erizo */ diff --git a/erizo/src/erizo/media/MediaProcessor.h b/erizo/src/erizo/media/MediaProcessor.h new file mode 100644 index 0000000000..9d423cfc80 --- /dev/null +++ b/erizo/src/erizo/media/MediaProcessor.h @@ -0,0 +1,208 @@ +#ifndef MEDIAPROCESSOR_H_ +#define MEDIAPROCESSOR_H_ + +#include +#include +#include +#include + +#include "rtp/RtpParser.h" +#include "../MediaDefinitions.h" +#include "codecs/Codecs.h" +#include "codecs/VideoCodec.h" + +extern "C" { +#include +#include + +} + +namespace erizo { + + +struct RTPInfo { + enum CodecID codec; + unsigned int ssrc; + unsigned int PT; +}; + +enum ProcessorType { + RTP_ONLY, AVF +}; + +enum DataType { + VIDEO, AUDIO +}; + +struct RawDataPacket { + unsigned char* data; + int length; + DataType type; +}; + +struct MediaInfo { + std::string url; + bool hasVideo; + bool hasAudio; + ProcessorType proccessorType; + RTPInfo rtpVideoInfo; + RTPInfo rtpAudioInfo; + VideoCodecInfo videoCodec; + AudioCodecInfo audioCodec; + +}; + +#define UNPACKAGED_BUFFER_SIZE 150000 +#define PACKAGED_BUFFER_SIZE 2000 +//class MediaProcessor{ +// MediaProcessor(); +// virtual ~Mediaprocessor(); +//private: +// InputProcessor* input; +// OutputProcessor* output; +//}; + +class RawDataReceiver { +public: + virtual void receiveRawData(RawDataPacket& packet) = 0; + virtual ~RawDataReceiver() { + } + ; +}; + +class RTPDataReceiver { +public: + virtual void receiveRtpData(unsigned char* rtpdata, int len) = 0; + virtual ~RTPDataReceiver() { + } + ; +}; + +class RTPSink; + +class InputProcessor: MediaReceiver { +public: + InputProcessor(); + virtual ~InputProcessor(); + + int init(const MediaInfo& info, RawDataReceiver* receiver); + + int receiveAudioData(char* buf, int len); + int receiveVideoData(char* buf, int len); + +private: + + int audioDecoder; + int videoDecoder; + + MediaInfo mediaInfo; + + int audioUnpackager; + int videoUnpackager; + + int gotUnpackagedFrame_; + int upackagedSize_; + + unsigned char* decodedBuffer_; + unsigned char* unpackagedBuffer_; + + unsigned char* decodedAudioBuffer_; + unsigned char* unpackagedAudioBuffer_; + + AVCodec* aDecoder; + AVCodecContext* aDecoderContext; + + + AVFormatContext* aInputFormatContext; + AVInputFormat* aInputFormat; + VideoDecoder vDecoder; + + RTPInfo* vRTPInfo; + + AVFormatContext* vInputFormatContext; + AVInputFormat* vInputFormat; + + RawDataReceiver* rawReceiver_; + + erizo::RtpParser pars; + + bool initAudioDecoder(); + + bool initAudioUnpackager(); + bool initVideoUnpackager(); + + int decodeAudio(unsigned char* inBuff, int inBuffLen, + unsigned char* outBuff); + + int unpackageAudio(unsigned char* inBuff, int inBuffLen, + unsigned char* outBuff); + int unpackageVideo(unsigned char* inBuff, int inBuffLen, + unsigned char* outBuff, int* gotFrame); + +}; +class OutputProcessor: public RawDataReceiver { +public: + + OutputProcessor(); + virtual ~OutputProcessor(); + int init(const MediaInfo& info, RTPDataReceiver* rtpReceiver); + + void receiveRawData(RawDataPacket& packet); + +private: + + int audioCoder; + int videoCoder; + + int audioPackager; + int videoPackager; + + unsigned int seqnum_; + + unsigned long timestamp_; + + unsigned char* encodedBuffer_; + unsigned char* packagedBuffer_; + unsigned char* rtpBuffer_; + + unsigned char* encodedAudioBuffer_; + unsigned char* packagedAudioBuffer_; + unsigned char* rtpAudioBuffer_; + + MediaInfo mediaInfo; + + RTPDataReceiver* rtpReceiver_; + + AVCodec* aCoder; + AVCodecContext* aCoderContext; + + VideoEncoder vCoder; + + + AVFormatContext* aOutputFormatContext; + AVOutputFormat* aOutputFormat; + + RTPInfo* vRTPInfo_; + RTPSink* sink_; + + AVFormatContext* vOutputFormatContext; + AVOutputFormat* vOutputFormat; + + RtpParser pars; + + bool initAudioCoder(); + + bool initAudioPackager(); + bool initVideoPackager(); + + int encodeAudio(unsigned char* inBuff, int nSamples, + AVPacket* pkt); + + int packageAudio(unsigned char* inBuff, int inBuffLen, + unsigned char* outBuff); + + int packageVideo(unsigned char* inBuff, int buffSize, unsigned char* outBuff); +}; +} /* namespace erizo */ + +#endif /* MEDIAPROCESSOR_H_ */ diff --git a/erizo/src/erizo/media/OneToManyTranscoder.cpp b/erizo/src/erizo/media/OneToManyTranscoder.cpp new file mode 100644 index 0000000000..2fd21f0b7e --- /dev/null +++ b/erizo/src/erizo/media/OneToManyTranscoder.cpp @@ -0,0 +1,163 @@ +/* + * OneToManyTranscoder.cpp + */ + +#include "OneToManyTranscoder.h" +#include "../WebRtcConnection.h" +#include "../RTPSink.h" +#include "rtp/RtpHeader.h" + +namespace erizo { +OneToManyTranscoder::OneToManyTranscoder() : + MediaReceiver() { + + sendVideoBuffer_ = (char*) malloc(2000); + sendAudioBuffer_ = (char*) malloc(2000); + + publisher = NULL; + sentPackets_ = 0; + ip = new InputProcessor(); + sink_ = new RTPSink("127.0.0.1", "50000"); + MediaInfo m; + m.proccessorType = RTP_ONLY; +// m.videoCodec.bitRate = 2000000; +// printf("m.videoCodec.bitrate %d\n\n", m.videoCodec.bitRate); + m.hasVideo = true; + m.videoCodec.width = 640; + m.videoCodec.height = 480; + m.hasAudio = false; + if (m.hasAudio) { + m.audioCodec.sampleRate = 8000; + m.audioCodec.bitRate = 64000; + + } + printf("init ip\n"); + ip->init(m, this); + + MediaInfo om; + om.proccessorType = RTP_ONLY; + om.videoCodec.bitRate = 2000000; + om.videoCodec.width = 640; + om.videoCodec.height = 480; + om.videoCodec.frameRate = 20; + om.hasVideo = true; +// om.url = "file://tmp/test.mp4"; + + om.hasAudio = false; + if (om.hasAudio) { + om.audioCodec.sampleRate = 8000; + om.audioCodec.bitRate = 64000; + } + + op = new OutputProcessor(); + op->init(om, this); + +} + +OneToManyTranscoder::~OneToManyTranscoder() { + this->closeAll(); + if (sendVideoBuffer_) + delete sendVideoBuffer_; + if (sendAudioBuffer_) + delete sendAudioBuffer_; + if (sink_) { + delete sink_; + } +} + +int OneToManyTranscoder::receiveAudioData(char* buf, int len) { + + if (subscribers.empty() || len <= 0) + return 0; + + std::map::iterator it; + for (it = subscribers.begin(); it != subscribers.end(); it++) { + memset(sendAudioBuffer_, 0, len); + memcpy(sendAudioBuffer_, buf, len); + (*it).second->receiveAudioData(sendAudioBuffer_, len); + } + + return 0; +} + +int OneToManyTranscoder::receiveVideoData(char* buf, int len) { + memset(sendVideoBuffer_, 0, len); + memcpy(sendVideoBuffer_, buf, len); + + RTPHeader* theHead = reinterpret_cast(buf); +// printf("extension %d pt %u\n", theHead->getExtension(), +// theHead->getPayloadType()); + + if (theHead->getPayloadType() == 100) { + ip->receiveVideoData(sendVideoBuffer_, len); + } else { + this->receiveRtpData((unsigned char*) buf, len); + } + +// if (subscribers.empty() || len <= 0) +// return 0; +// if (sentPackets_ % 500 == 0) { +// publisher->sendFirPacket(); +// } +// std::map::iterator it; +// for (it = subscribers.begin(); it != subscribers.end(); it++) { +// memset(sendVideoBuffer_, 0, len); +// memcpy(sendVideoBuffer_, buf, len); +// (*it).second->receiveVideoData(sendVideoBuffer_, len); +// } +// memset(sendVideoBuffer_, 0, len); +// memcpy(sendVideoBuffer_, buf, len); +// sink_->sendData((unsigned char*)sendVideoBuffer_,len); + + sentPackets_++; + return 0; +} + +void OneToManyTranscoder::receiveRawData(RawDataPacket& pkt) { +// printf("Received %d\n", pkt.length); + op->receiveRawData(pkt); +} + +void OneToManyTranscoder::receiveRtpData(unsigned char*rtpdata, int len) { + printf("Received rtp data %d\n", len); + memcpy(sendVideoBuffer_, rtpdata, len); + + if (subscribers.empty() || len <= 0) + return; +// if (sentPackets_ % 500 == 0) { +// publisher->sendFirPacket(); +// } + std::map::iterator it; + for (it = subscribers.begin(); it != subscribers.end(); it++) { + memcpy(sendVideoBuffer_, rtpdata, len); + (*it).second->receiveVideoData(sendVideoBuffer_, len); + } + sentPackets_++; +} + +void OneToManyTranscoder::setPublisher(WebRtcConnection* webRtcConn) { + this->publisher = webRtcConn; +} + +void OneToManyTranscoder::addSubscriber(WebRtcConnection* webRtcConn, + int peerId) { + this->subscribers[peerId] = webRtcConn; +} + +void OneToManyTranscoder::removeSubscriber(int peerId) { + if (this->subscribers.find(peerId) != subscribers.end()) { + this->subscribers[peerId]->close(); + this->subscribers.erase(peerId); + } +} + +void OneToManyTranscoder::closeAll() { + std::map::iterator it; + for (it = subscribers.begin(); it != subscribers.end(); it++) { + (*it).second->close(); + } + this->publisher->close(); +} + +}/* namespace erizo */ + diff --git a/erizo/src/erizo/media/OneToManyTranscoder.h b/erizo/src/erizo/media/OneToManyTranscoder.h new file mode 100644 index 0000000000..28a09d2e16 --- /dev/null +++ b/erizo/src/erizo/media/OneToManyTranscoder.h @@ -0,0 +1,74 @@ +/* + * OneToManyTranscoder.h + */ + +#ifndef ONETOMANYTRANSCODER_H_ +#define ONETOMANYTRANSCODER_H_ + +#include +#include + +#include "../MediaDefinitions.h" +#include "MediaProcessor.h" + + +namespace erizo{ +class WebRtcConnection; +class RTPSink; + +/** + * Represents a One to Many connection. + * Receives media from one publisher and retransmits it to every subscriber. + */ +class OneToManyTranscoder : public MediaReceiver, public RawDataReceiver, public RTPDataReceiver { +public: + WebRtcConnection *publisher; + std::map subscribers; + + OneToManyTranscoder(); + virtual ~OneToManyTranscoder(); + /** + * Sets the Publisher + * @param webRtcConn The WebRtcConnection of the Publisher + */ + void setPublisher(WebRtcConnection* webRtcConn); + /** + * Sets the subscriber + * @param webRtcConn The WebRtcConnection of the subscriber + * @param peerId An unique Id for the subscriber + */ + void addSubscriber(WebRtcConnection* webRtcConn, int peerId); + /** + * Eliminates the subscriber given its peer id + * @param peerId the peerId + */ + void removeSubscriber(int peerId); + int receiveAudioData(char* buf, int len); + int receiveVideoData(char* buf, int len); + void receiveRawData(RawDataPacket& packet); + void receiveRtpData(unsigned char*rtpdata, int len); + +// MediaProcessor *mp; + InputProcessor* ip; + OutputProcessor* op; + /** + * Closes all the subscribers and the publisher, the object is useless after this + */ + void closeAll(); + +private: + char* sendVideoBuffer_; + char* sendAudioBuffer_; + char* unpackagedBuffer_; + char* decodedBuffer_; + char* codedBuffer_; + RTPSink* sink_; + std::vector head; + int gotFrame_,gotDecodedFrame_, size_; + void sendHead(WebRtcConnection* conn); + RtpParser pars; + unsigned int sentPackets_; +}; + +} /* namespace erizo */ +#endif /* ONETOMANYTRANSCODER_H_ */ diff --git a/erizo/src/erizo/media/codecs/Codecs.h b/erizo/src/erizo/media/codecs/Codecs.h new file mode 100644 index 0000000000..9bbb3333e9 --- /dev/null +++ b/erizo/src/erizo/media/codecs/Codecs.h @@ -0,0 +1,32 @@ + +#ifndef CODECS_H_ +#define CODECS_H_ + +#include +namespace erizo{ + + enum VideoCodecID{ + VIDEO_CODEC_VP8, + VIDEO_CODEC_H264 + }; + + enum AudioCodecID{ + AUDIO_CODEC_PCM_MULAW_8 + }; + + struct VideoCodecInfo { + VideoCodecID codec; + int payloadType; + int width; + int height; + int bitRate; + int frameRate; + }; + + struct AudioCodecInfo { + AudioCodecID codec; + int bitRate; + int sampleRate; + }; +} +#endif /* CODECS_H_ */ diff --git a/erizo/src/erizo/media/codecs/VideoCodec.cpp b/erizo/src/erizo/media/codecs/VideoCodec.cpp new file mode 100644 index 0000000000..ddbaef9f21 --- /dev/null +++ b/erizo/src/erizo/media/codecs/VideoCodec.cpp @@ -0,0 +1,250 @@ +/** + * VP8Codec.pp + */ + +#include "VideoCodec.h" +#include "Codecs.h" + +#include +#include +extern "C" { +#include +} + +namespace erizo { + inline CodecID + VideoCodecID2ffmpegDecoderID(VideoCodecID codec) + { + switch (codec) + { + case VIDEO_CODEC_H264: return CODEC_ID_H264; + case VIDEO_CODEC_VP8: return CODEC_ID_VP8; + default: printf("Unknown codec\n"); return CODEC_ID_VP8; + } + } + + VideoEncoder::VideoEncoder(){ + avcodec_register_all(); + } + + int VideoEncoder::initEncoder(const VideoCodecInfo& info){ + vCoder = avcodec_find_encoder(VideoCodecID2ffmpegDecoderID(info.codec)); + if (!vCoder) { + printf("Video codec not found for encoder"); + return -1; + } + + vCoderContext = avcodec_alloc_context3(vCoder); + if (!vCoderContext) { + printf("Error allocating vCoderContext"); + return -2; + } + + vCoderContext->bit_rate = info.bitRate; + vCoderContext->rc_min_rate = info.bitRate; // + vCoderContext->rc_max_rate = info.bitRate; // VPX_CBR + vCoderContext->qmin = 8; + vCoderContext->qmax = 56; // rc_quantifiers + // vCoderContext->frame_skip_threshold = 30; + vCoderContext->rc_buffer_aggressivity = 1; + vCoderContext->rc_buffer_size = vCoderContext->bit_rate; + vCoderContext->rc_initial_buffer_occupancy = vCoderContext->bit_rate / 2; + vCoderContext->width = info.width; + vCoderContext->height = info.height; + vCoderContext->pix_fmt = PIX_FMT_YUV420P; + vCoderContext->time_base = (AVRational) {1, 90000}; + vCoderContext->sample_aspect_ratio = + (AVRational) {info.width,info.height}; + + if (avcodec_open2(vCoderContext, vCoder, NULL) < 0) { + printf("Error opening video decoder"); + return -3; + } + + cPicture = avcodec_alloc_frame(); + if (!cPicture) { + printf("Error allocating video frame"); + return -4; + } + + printf("videoCoder configured successfully %d x %d\n", vCoderContext->width, + vCoderContext->height); + return 0; + } + + int VideoEncoder::encodeVideo (unsigned char* inBuffer, int inLength, unsigned char* outBuffer, int outLength, int& hasFrame){ + + int size = vCoderContext->width * vCoderContext->height; + printf("vCoderContext width %d\n", vCoderContext->width); + + cPicture->pts = AV_NOPTS_VALUE; + cPicture->data[0] = inBuffer; + cPicture->data[1] = inBuffer + size; + cPicture->data[2] = inBuffer + size + size / 4; + cPicture->linesize[0] = vCoderContext->width; + cPicture->linesize[1] = vCoderContext->width / 2; + cPicture->linesize[2] = vCoderContext->width / 2; + + AVPacket pkt; + av_init_packet(&pkt); + pkt.data = outBuffer; + pkt.size = outLength; + + int ret = 0; + int got_packet = 0; + printf( + "Before encoding inBufflen %d, size %d, codecontext width %d pkt->size%d\n", + inLength, size, vCoderContext->width, pkt.size); + ret = avcodec_encode_video2(vCoderContext, &pkt, cPicture, &got_packet); + printf("Encoded video size %u, ret %d, got_packet %d, pts %lld, dts %lld\n", + pkt.size, ret, got_packet, pkt.pts, pkt.dts); + if (!ret && got_packet && vCoderContext->coded_frame) { + vCoderContext->coded_frame->pts = pkt.pts; + vCoderContext->coded_frame->key_frame = + !!(pkt.flags & AV_PKT_FLAG_KEY); + } + return ret ? ret : pkt.size; + } + + int VideoEncoder::closeEncoder() { + return 0; + } + + + VideoDecoder::VideoDecoder(){ + avcodec_register_all(); + vDecoder = 0; + vDecoderContext = 0; + } + + int VideoDecoder::initDecoder (const VideoCodecInfo& info){ + printf("Init Decoder\n"); + vDecoder = avcodec_find_decoder(CODEC_ID_VP8); + if (!vDecoder) { + printf("Error getting video decoder\n"); + return -1; + } + + vDecoderContext = avcodec_alloc_context3(vDecoder); + if (!vDecoderContext) { + printf("Error getting allocating decoder context"); + return -1; + } + + vDecoderContext->width = info.width; + vDecoderContext->height = info.height; + + if (avcodec_open2(vDecoderContext, vDecoder, NULL) < 0) { + printf("Error opening video decoder\n"); + return -1; + } + + dPicture = avcodec_alloc_frame(); + if (!dPicture) { + printf("Error allocating video frame\n"); + return -1; + } + + return 0; + } + int VideoDecoder::decodeVideo(unsigned char* inBuff, int inBuffLen, + unsigned char* outBuff, int outBuffLen, int* gotFrame){ + printf("decode video\n"); + if (vDecoder == 0 || vDecoderContext == 0){ + printf("Init Codec First\n"); + return -1; + } + + *gotFrame = false; + + AVPacket avpkt; + av_init_packet(&avpkt); + + avpkt.data = inBuff; + avpkt.size = inBuffLen; + + int got_picture; + int len; + + while (avpkt.size > 0) { + + len = avcodec_decode_video2(vDecoderContext, dPicture, &got_picture, + &avpkt); + + if (len < 0) { + printf("Error decoding video frame\n"); + return -1; + } + + if (got_picture) { + *gotFrame = 1; + goto decoding; + } + avpkt.size -= len; + avpkt.data += len; + } + + if (!got_picture) { + return -1; + } + +decoding: + + int outSize = vDecoderContext->height * vDecoderContext->width; + + if (outBuffLen < (outSize * 3 / 2)) { + return outSize * 3 / 2; + } + + unsigned char *lum = outBuff; + unsigned char *cromU = outBuff + outSize; + unsigned char *cromV = outBuff + outSize + outSize / 4; + + unsigned char *src = NULL; + int src_linesize, dst_linesize; + + src_linesize = dPicture->linesize[0]; + dst_linesize = vDecoderContext->width; + src = dPicture->data[0]; + + for (int i = vDecoderContext->height; i > 0; i--) { + memcpy(lum, src, dst_linesize); + lum += dst_linesize; + src += src_linesize; + } + + src_linesize = dPicture->linesize[1]; + dst_linesize = vDecoderContext->width / 2; + src = dPicture->data[1]; + + for (int i = vDecoderContext->height / 2; i > 0; i--) { + memcpy(cromU, src, dst_linesize); + cromU += dst_linesize; + src += src_linesize; + } + + src_linesize = dPicture->linesize[2]; + dst_linesize = vDecoderContext->width / 2; + src = dPicture->data[2]; + + for (int i = vDecoderContext->height / 2; i > 0; i--) { + memcpy(cromV, src, dst_linesize); + cromV += dst_linesize; + src += src_linesize; + } + av_free_packet(&avpkt); + + return outSize * 3 / 2; + } + + int VideoDecoder::closeDecoder(){ + if (dPicture!=0) + av_free(dPicture); + if (vDecoderContext!=0){ + avcodec_close(vDecoderContext); + av_free(vDecoderContext); + } + return 0; + } + +} diff --git a/erizo/src/erizo/media/codecs/VideoCodec.h b/erizo/src/erizo/media/codecs/VideoCodec.h new file mode 100644 index 0000000000..8d4067d704 --- /dev/null +++ b/erizo/src/erizo/media/codecs/VideoCodec.h @@ -0,0 +1,46 @@ +/** + * VideoCodec.h + */ + +#ifndef VIDEOCODEC_H_ +#define VIDEOCODEC_H_ + +#include "Codecs.h" +//Forward Declarations + +struct AVCodec; +struct AVCodecContext; +struct AVFrame; + +namespace erizo { + + class VideoEncoder { + public: + VideoEncoder(); + int initEncoder (const VideoCodecInfo& info); + int encodeVideo (unsigned char* inBuffer, int length, + unsigned char* outBuffer, int outLength, int& hasFrame); + int closeEncoder (); + + private: + AVCodec* vCoder; + AVCodecContext* vCoderContext; + AVFrame* cPicture; + }; + + class VideoDecoder { + public: + VideoDecoder(); + int initDecoder (const VideoCodecInfo& info); + int decodeVideo(unsigned char* inBuff, int inBuffLen, + unsigned char* outBuff, int outBuffLen, int* gotFrame); + int closeDecoder(); + + private: + AVCodec* vDecoder; + AVCodecContext* vDecoderContext; + AVFrame* dPicture; + }; + +} +#endif /* VIDEOCODEC_H_ */ diff --git a/erizo/src/erizo/media/mixers/VideoMixer.cpp b/erizo/src/erizo/media/mixers/VideoMixer.cpp new file mode 100644 index 0000000000..815438b2ef --- /dev/null +++ b/erizo/src/erizo/media/mixers/VideoMixer.cpp @@ -0,0 +1,80 @@ +/* + * VideoMixer.cpp + */ + +#include "VideoMixer.h" +#include "VideoUtils.h" +#include "../../RTPSink.h" +#include "../../WebRtcConnection.h" + +namespace erizo { + VideoMixer::VideoMixer() : + MediaReceiver() { + + sendVideoBuffer_ = (char*) malloc(2000); + sendAudioBuffer_ = (char*) malloc(2000); + + subscriber = NULL; + sentPackets_ = 0; + ip = new InputProcessor(); + sink_ = new RTPSink("127.0.0.1", "50000"); + MediaInfo m; + m.proccessorType = RTP_ONLY; + // m.videoCodec.bitRate = 2000000; + // printf("m.videoCodec.bitrate %d\n\n", m.videoCodec.bitRate); + m.hasVideo = true; + m.videoCodec.width = 640; + m.videoCodec.height = 480; + ip->init(m, this); + + MediaInfo om; + om.proccessorType = RTP_ONLY; + om.videoCodec.bitRate = 2000000; + om.videoCodec.width = 640; + om.videoCodec.height = 480; + om.videoCodec.frameRate = 20; + om.hasVideo = true; + op = new OutputProcessor(); + op->init(om, this); + + } + + VideoMixer::~VideoMixer() { + + if (sendVideoBuffer_) + delete sendVideoBuffer_; + if (sendAudioBuffer_) + delete sendAudioBuffer_; + if (sink_) { + delete sink_; + } + } + + int VideoMixer::receiveAudioData(char* buf, int len) { + + } + + int VideoMixer::receiveVideoData(char* buf, int len) { + + } + + void VideoMixer::receiveRawData(RawDataPacket& pkt) { + } + + void VideoMixer::receiveRtpData(unsigned char* rtpdata, int len){ + } + + void VideoMixer::addPublisher(WebRtcConnection* webRtcConn, int peerSSRC){ + } + + void VideoMixer::setSubscriber(WebRtcConnection* webRtcConn){ + } + + void VideoMixer::removePublisher(int peerSSRC) { + } + + void VideoMixer::closeAll() { + } + +}/* namespace erizo */ + diff --git a/erizo/src/erizo/media/mixers/VideoMixer.h b/erizo/src/erizo/media/mixers/VideoMixer.h new file mode 100644 index 0000000000..b098b08412 --- /dev/null +++ b/erizo/src/erizo/media/mixers/VideoMixer.h @@ -0,0 +1,75 @@ + +/* + * VideoMixer.h + */ + +#ifndef VIDEOMIXER_H_ +#define VIDEOMIXER_H_ + +#include +#include + +#include "../../MediaDefinitions.h" +#include "../MediaProcessor.h" + + +namespace erizo{ +class WebRtcConnection; +class RTPSink; + +/** + * Represents a One to Many connection. + * Receives media from one publisher and retransmits it to every subscriber. + */ +class VideoMixer : public MediaReceiver, public RawDataReceiver, public RTPDataReceiver { +public: + WebRtcConnection *subscriber; + std::map publishers; + + VideoMixer(); + virtual ~VideoMixer(); + /** + * Sets the Publisher + * @param webRtcConn The WebRtcConnection of the Publisher + */ + void addPublisher(WebRtcConnection* webRtcConn, int peerSSRC); + /** + * Sets the subscriber + * @param webRtcConn The WebRtcConnection of the subscriber + * @param peerId An unique Id for the subscriber + */ + void setSubscriber(WebRtcConnection* webRtcConn); + /** + * Eliminates the subscriber given its peer id + * @param peerId the peerId + */ + void removePublisher(int peerSSRC); + int receiveAudioData(char* buf, int len); + int receiveVideoData(char* buf, int len); + void receiveRawData(RawDataPacket& packet); + void receiveRtpData(unsigned char* rtpdata, int len); + +// MediaProcessor *mp; + InputProcessor* ip; + OutputProcessor* op; + /** + * Closes all the subscribers and the publisher, the object is useless after this + */ + void closeAll(); + +private: + char* sendVideoBuffer_; + char* sendAudioBuffer_; + char* unpackagedBuffer_; + char* decodedBuffer_; + char* codedBuffer_; + RTPSink* sink_; + std::vector head; + int gotFrame_,gotDecodedFrame_, size_; + void sendHead(WebRtcConnection* conn); + RtpParser pars; + unsigned int sentPackets_; +}; + +} /* namespace erizo */ +#endif /* VIDEOMIXER_H_ */ diff --git a/erizo/src/erizo/media/mixers/VideoUtils.cpp b/erizo/src/erizo/media/mixers/VideoUtils.cpp new file mode 100644 index 0000000000..365946bca7 --- /dev/null +++ b/erizo/src/erizo/media/mixers/VideoUtils.cpp @@ -0,0 +1,647 @@ +/** + * VideoUtils.cpp + */ +#include +#include + +#include "VideoUtils.h" + +// +// MIN macro +// +#define MIN(a,b) (a(format)) + { + case I420P_FORMAT: + if (outBuffLen < outW*outH*3/2) + { + printf("vRescale:: needed %d, outBuffLen = %d\n", + outW*outH*3/2, + outBuffLen + ); + return -1; + } + + //rescale luminance + vRescaleP(inBuff, + inW*inH, + outBuff, + outW*outH, + inW, + inH, + outW, + outH, + 1 // Bytes Per Pixel + ); + + //rescale chroma U + vRescaleP(inBuff+inW*inH, + inW*inH*4, + outBuff+outW*outH, + outW*outH*4, + inW/2, + inH/2, + outW/2, + outH/2, + 1 // Bytes Per Pixel + ); + + //rescale chroma V + vRescaleP(inBuff+inW*inH*5/4, + inW*inH*4, + outBuff+outW*outH*5/4, + outW*outH*4, + inW/2, + inH/2, + outW/2, + outH/2, + 1 // Bytes Per Pixel + ); + + return outW*outH*3/2; + + case RGB24_FORMAT: + case BGR24_FORMAT: + if (outBuffLen < outW*outH*3) + { + printf("vRescale:: needed %d, outBuffLen = %d\n", + outW*outH*3, + outBuffLen + ); + return -1; + } + + //rescale rgb plane + vRescaleP(inBuff, + inW*inH, + outBuff, + outW*outH, + inW, + inH, + outW, + outH, + 3 // Bytes Per Pixel + ); + + return outW*outH*3; + + default: + fprintf(stderr, "vRescale: not implemented for %d\n", format); + abort(); + } + return -1; +} + +inline void +vPutImageP(unsigned char *inBuff, + unsigned int inBuffLen, + unsigned char *outBuff, + unsigned int W, + unsigned int H, + unsigned int X, + unsigned int Y, + unsigned int totalW, + unsigned int totalH, + unsigned int BPP, + unsigned char *mask, + bool invert + ) +{ + unsigned lineSize1 = W*BPP; + unsigned lineSize2 = totalW*BPP; + unsigned initRectPos1 = 0; + unsigned initRectPos2 = lineSize2*Y + X*BPP; + unsigned position1 = 0; + unsigned position2 = 0; + + if (mask) + { + for (unsigned i = 0; i < H; i++) + { + position1 = initRectPos1 + lineSize1*i; //save image1 position + position2 = initRectPos2 + lineSize2*i; //save image2 position + for (unsigned j = 0; j < lineSize1; j++) + { + if (((bool)mask[position2+j])^invert) + { + outBuff[position2+j] = inBuff[position1+j]; //copy pixel + } + } + } + } + else + { + for (unsigned i = 0; i < H; i++) + { + position1 = initRectPos1 + lineSize1*i; //save image1 position + position2 = initRectPos2 + lineSize2*i; //save image2 position + memcpy(&outBuff[position2],&inBuff[position1],lineSize1); //copy line + } + } +} + +int +VideoUtils::vPutImage(unsigned char *inBuff, + unsigned int inBuffLen, + unsigned char *outBuff, + unsigned int outBuffLen, + unsigned int inW, + unsigned int inH, + unsigned int outW, + unsigned int outH, + unsigned int posX, + unsigned int posY, + unsigned int totalW, + unsigned int totalH, + uint32_t format, + unsigned char *mask, + bool invert + ) +{ + if ((outW > totalW) || (outH > totalH)) + { + printf("vPutImage : output resolution greater" + " than total image resolution!\n" + ); + return -1; + } + + if (posX + outW>totalW) outW = totalW-posX; + if (posY + outH>totalH) outH = totalH-posY; + + double factor = 0; + int BPP = 0; + switch (format) + { + case I420P_FORMAT: + factor = 1.5; + BPP = 1; + break; + case RGB24_FORMAT: + case BGR24_FORMAT: + factor = 3; + BPP = 3; + break; + default: + printf("vPutImage : unknown format %d\n", format); + abort(); + } + + if (outBuffLen < outW*outH*BPP*factor) + { + printf("vPutImage :: needed %f, outBuffLen = %d\n", + totalW*totalH*BPP*factor, + outBuffLen + ); + return -1; + } + + unsigned char * image = inBuff; + + int len = inBuffLen; + if ((inW != outW) || + (inH != outH)) + { + len = int(outW*outH*factor); + image = new unsigned char[len]; + int ret = vRescale (inBuff, + inBuffLen, + image, + len, + inW, + inH, + outW, + outH, + format + ); + + if (ret<=0) + { + printf("vPutImage : vRescale failed\n"); + delete [] image; + return -1; + } + } + + switch (format) + { + case I420P_FORMAT: + + //put luminance plane + vPutImageP(image, + outW*outH*BPP, + outBuff, + outW, + outH, + posX, + posY, + totalW, + totalH, + BPP, + mask, + invert + ); + + //put chroma U plane + vPutImageP(image+outW*outH*BPP, + outW*outH*BPP/4, + outBuff+totalW*totalH*BPP, + outW/2, + outH/2, + posX/2, + posY/2, + totalW/2, + totalH/2, + BPP, + mask?mask+totalW*totalH*BPP:NULL, + invert + ); + + //put chroma V plane + vPutImageP(image+outW*outH*BPP*5/4, + outW*outH*BPP/4, + outBuff+totalW*totalH*BPP*5/4, + outW/2, + outH/2, + posX/2, + posY/2, + totalW/2, + totalH/2, + BPP, + mask?mask+totalW*totalH*BPP*5/4:NULL, + invert + ); + break; + + case RGB24_FORMAT: + case BGR24_FORMAT: + + //put bgr plane + vPutImageP(image, + outW*outH*BPP, + outBuff, + outW, + outH, + posX, + posY, + totalW, + totalH, + BPP, + mask, + invert + ); + break; + + default: + printf("vPutImage : unknown format\n"); + abort(); + } + + if (image != inBuff) + { + delete[] image; + } + + return int(totalW*totalH*BPP*factor); +} + +inline void +vSetMaskRectP(unsigned char *mask, + unsigned int W, + unsigned int H, + unsigned int posX, + unsigned int posY, + unsigned int totalW, + unsigned int totalH, + bool val, + int BPP + ) +{ + unsigned lineSize1 = W*BPP; + unsigned lineSize2 = totalW*BPP; + unsigned initRectPos1 = 0; + unsigned initRectPos2 = lineSize2*posY + posX*BPP; + unsigned position1 = 0; + unsigned position2 = 0; + for(unsigned i = 0; i < H; i++) + { + position1 = initRectPos1 + lineSize1*i; //save image1 position + position2 = initRectPos2 + lineSize2*i; //save image2 position + for (unsigned j = 0; j < lineSize1; j+=BPP) + { + for (int k = 0;k + +class VideoUtils{ + + enum ImgFormat{ + I420P_FORMAT, + RGB24_FORMAT, + BGR24_FORMAT + }; + + static int + vRescale(unsigned char *inBuff, + unsigned int inBuffLen, + unsigned char *outBuff, + unsigned int outBuffLen, + unsigned int inW, + unsigned int inH, + unsigned int outW, + unsigned int outH, + uint32_t format + ); + + static int + vPutImage(unsigned char *inBuff, + unsigned int inBuffLen, + unsigned char *outBuff, + unsigned int outBuffLen, + unsigned int inW, + unsigned int inH, + unsigned int outW, + unsigned int outH, + unsigned int posX, + unsigned int posY, + unsigned int totalW, + unsigned int totalH, + uint32_t format, + unsigned char *mask = NULL, + bool invertMask = false + ); + + static void + vSetMaskRect(unsigned char *mask, + unsigned int W, + unsigned int H, + unsigned int posX, + unsigned int posY, + unsigned int totalW, + unsigned int totalH, + bool val, + uint32_t format + ); + + static int + vSetMask(unsigned char *outBuff, + unsigned outBuffLen, + unsigned char *mask, + unsigned W, + unsigned H, + unsigned totalW, + unsigned totalH, + bool val, + uint32_t format + ); +}; + +#endif //_VIDEOUTILS_H_ + diff --git a/erizo/src/erizo/media/rtp/RtpHeader.h b/erizo/src/erizo/media/rtp/RtpHeader.h new file mode 100644 index 0000000000..63b36d53b2 --- /dev/null +++ b/erizo/src/erizo/media/rtp/RtpHeader.h @@ -0,0 +1,187 @@ +/* + * RtpHeader.h + * + * Created on: Sep 20, 2012 + * Author: pedro + */ + +#ifndef RTPHEADER_H_ +#define RTPHEADER_H_ + +class RTPHeader { +public: + // constants + + /** + * KSize + * Longitud de la cabecera en bytes. + */ + static const int MIN_SIZE = 12; + +public: + // Constructor + inline RTPHeader() : + cc(0), extension(0), padding(0), version(2), payloadtype(0), marker( + 0), seqnum(0), timestamp(0), ssrc(0), extId(0), extLength(0) { + // No implementation required + } + +public: + // Member functions + + /** + * Get the marker bit from the RTP header. + * @return 1 if marker bit is set 0 if is not set. + */ + inline uint8_t getMarker() const { + return marker; + } + + /** + * Set the marker bit from the RTP header. + * @param aMarker 1 to set marker bit, 0 to unset it. + */ + inline void setMarker(uint8_t aMarker) { + marker = aMarker; + } + + /** + * Get the extension bit from the RTP header. + * @return 1 if extension bit is set 0 if is not set. + */ + inline uint8_t getExtension() const { + return extension; + } + + /** + * Set the extension bit from the RTP header + * @param ext 1 to set extension bit, 0 to unset i + */ + inline void setExtension(uint8_t ext) { + extension = ext; + } + + /** + * Get the payload type from the RTP header. + * @return A TInt8 holding the value. + */ + inline uint8_t getPayloadType() const { + return payloadtype; + } + + /** + * Set the payload type from the RTP header. + * @param aType the payload type. Valid range between 0x00 to 0x7F + */ + inline void setPayloadType(uint8_t aType) { + payloadtype = aType; + } + + /** + * Get the sequence number field from the RTP header. + * @return A TInt16 holding the value. + */ + inline uint16_t getSeqNumber() const { + return ntohs(seqnum); + } + + /** + * Set the seq number from the RTP header. + * @param aSeqNumber The seq number. Valid range between 0x0000 to 0xFFFF + */ + inline void setSeqNumber(uint16_t aSeqNumber) { + seqnum = htons(aSeqNumber); + } + + /** + * Get the Timestamp field from the RTP header. + * @return A TInt32 holding the value. + */ + inline uint32_t getTimestamp() const { + return ntohl(timestamp); + } + + /** + * Set the Timestamp from the RTP header. + * @param aTimestamp The Tmestamp. Valid range between 0x00000000 to 0xFFFFFFFF + */ + inline void setTimestamp(uint32_t aTimestamp) { + timestamp = htonl(aTimestamp); + } + + /** + * Get the SSRC field from the RTP header. + * @return A TInt32 holding the value. + */ + inline uint32_t getSSRC() const { + return ntohl(ssrc); + } + + /** + * Set the SSRC from the RTP header. + * @param aSSRC The SSRC. Valid range between 0x00000000 to 0xFFFFFFFF + */ + inline void setSSRC(uint32_t aSSRC) { + ssrc = htonl(aSSRC); + } + /** + * Get the sequence number field from the RTP header. + * @return A TInt16 holding the value. + */ + inline uint16_t getExtId() const { + return ntohs(extId); + } + + /** + * Set the seq number from the RTP header. + * @param extensionId The seq number. Valid range between 0x0000 to 0xFFFF + */ + inline void setExtId(uint16_t extensionId) { + extId = htons(extensionId); + } + + /** + * Get the sequence number field from the RTP header. + * @return A TInt16 holding the value. + */ + inline uint16_t getExtLength() const { + return ntohs(extLength); + } + + /** + * Set the seq number from the RTP header. + * @param aSeqNumber The seq number. Valid range between 0x0000 to 0xFFFF + */ + inline void setExtLength(uint16_t extensionLength) { + extId = htons(extensionLength); + } + + /** + * Get the RTP header length + * @return the length in 8 bit units + */ + inline int getHeaderLength() { + return MIN_SIZE + cc * 4 + extension * (4 + extLength * 4); + } + + + +private: + // Data + + uint32_t cc :4; + uint32_t extension :1; + uint32_t padding :1; + uint32_t version :2; + uint32_t payloadtype :7; + uint32_t marker :1; + uint32_t seqnum :16; + uint32_t timestamp; + uint32_t ssrc; + uint32_t csrc[3]; + uint32_t extId :16; + uint32_t extLength :16; + +}; + +#endif /* RTPHEADER_H_ */ diff --git a/erizo/src/erizo/media/rtp/RtpParser.cpp b/erizo/src/erizo/media/rtp/RtpParser.cpp new file mode 100644 index 0000000000..1e6dc967d4 --- /dev/null +++ b/erizo/src/erizo/media/rtp/RtpParser.cpp @@ -0,0 +1,211 @@ +/* + * This file contains third party code: copyright below + */ + +/* + * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#include +#include +#include + +#include "RtpParser.h" + +namespace erizo { + +RtpParser::RtpParser() { +} + +RtpParser::~RtpParser() { + +} + +// +// VP8 format: +// +// Payload descriptor +// 0 1 2 3 4 5 6 7 +// +-+-+-+-+-+-+-+-+ +// |X|R|N|S|PartID | (REQUIRED) +// +-+-+-+-+-+-+-+-+ +// X: |I|L|T|K| RSV | (OPTIONAL) +// +-+-+-+-+-+-+-+-+ +// I: | PictureID | (OPTIONAL) +// +-+-+-+-+-+-+-+-+ +// L: | TL0PICIDX | (OPTIONAL) +// +-+-+-+-+-+-+-+-+ +// T/K: |TID:Y| KEYIDX | (OPTIONAL) +// +-+-+-+-+-+-+-+-+ +// +// Payload header (considered part of the actual payload, sent to decoder) +// 0 1 2 3 4 5 6 7 +// +-+-+-+-+-+-+-+-+ +// |Size0|H| VER |P| +// +-+-+-+-+-+-+-+-+ +// | ... | +// + + +int ParseVP8PictureID(erizo::RTPPayloadVP8* vp8, const unsigned char** dataPtr, + int* dataLength, int* parsedBytes) { + if (*dataLength <= 0) + return -1; + vp8->pictureID = (**dataPtr & 0x7F); + if (**dataPtr & 0x80) { + (*dataPtr)++; + (*parsedBytes)++; + if (--(*dataLength) <= 0) + return -1; + // PictureID is 15 bits + vp8->pictureID = (vp8->pictureID << 8) + **dataPtr; + } + (*dataPtr)++; + (*parsedBytes)++; + (*dataLength)--; + return 0; +} + +int ParseVP8Tl0PicIdx(erizo::RTPPayloadVP8* vp8, const unsigned char** dataPtr, + int* dataLength, int* parsedBytes) { + if (*dataLength <= 0) + return -1; + vp8->tl0PicIdx = **dataPtr; + (*dataPtr)++; + (*parsedBytes)++; + (*dataLength)--; + return 0; +} + +int ParseVP8TIDAndKeyIdx(erizo::RTPPayloadVP8* vp8, + const unsigned char** dataPtr, int* dataLength, int* parsedBytes) { + if (*dataLength <= 0) + return -1; + if (vp8->hasTID) { + vp8->tID = ((**dataPtr >> 6) & 0x03); + vp8->layerSync = (**dataPtr & 0x20) ? true : false; // Y bit + } + if (vp8->hasKeyIdx) { + vp8->keyIdx = (**dataPtr & 0x1F); + } + (*dataPtr)++; + (*parsedBytes)++; + (*dataLength)--; + return 0; +} + +//int ParseVP8FrameSize(RTPPayload& parsedPacket, +// const unsigned char* dataPtr, +// int dataLength) { +// if (parsedPacket.frameType != kIFrame) { +// // Included in payload header for I-frames. +// return 0; +// } +// if (dataLength < 10) { +// // For an I-frame we should always have the uncompressed VP8 header +// // in the beginning of the partition. +// return -1; +// } +// RTPPayloadVP8* vp8 = &parsedPacket.info.VP8; +// vp8->frameWidth = ((dataPtr[7] << 8) + dataPtr[6]) & 0x3FFF; +// vp8->frameHeight = ((dataPtr[9] << 8) + dataPtr[8]) & 0x3FFF; +// return 0; +//} + +int ParseVP8Extension(erizo::RTPPayloadVP8* vp8, const unsigned char* dataPtr, int dataLength) { + + int parsedBytes = 0; + if (dataLength <= 0) + return -1; + // Optional X field is present + vp8->hasPictureID = (*dataPtr & 0x80) ? true : false; // I bit + vp8->hasTl0PicIdx = (*dataPtr & 0x40) ? true : false; // L bit + vp8->hasTID = (*dataPtr & 0x20) ? true : false; // T bit + vp8->hasKeyIdx = (*dataPtr & 0x10) ? true : false; // K bit + + printf("Parsing extension haspic %d, hastl0 %d, has TID %d, has Key %d \n",vp8->hasPictureID,vp8->hasTl0PicIdx, vp8->hasTID, vp8->hasKeyIdx ); + + // Advance dataPtr and decrease remaining payload size + dataPtr++; + parsedBytes++; + dataLength--; + + if (vp8->hasPictureID) { + if (ParseVP8PictureID(vp8, &dataPtr, &dataLength, &parsedBytes) != 0) { + return -1; + } + } + + if (vp8->hasTl0PicIdx) { + if (ParseVP8Tl0PicIdx(vp8, &dataPtr, &dataLength, &parsedBytes) != 0) { + return -1; + } + } + + if (vp8->hasTID || vp8->hasKeyIdx) { + if (ParseVP8TIDAndKeyIdx(vp8, &dataPtr, &dataLength, &parsedBytes) + != 0) { + return -1; + } + } + return parsedBytes; +} + +RTPPayloadVP8* RtpParser::parseVP8(unsigned char* data, + int dataLength) { + RTPPayloadVP8* vp8 = new RTPPayloadVP8; // = &parsedPacket.info.VP8; + const unsigned char* dataPtr = data; + + // Parse mandatory first byte of payload descriptor + bool extension = (*dataPtr & 0x80) ? true : false; // X bit + vp8->nonReferenceFrame = (*dataPtr & 0x20) ? true : false; // N bit + vp8->beginningOfPartition = (*dataPtr & 0x10) ? true : false; // S bit + vp8->partitionID = (*dataPtr & 0x0F); // PartID field + + printf("X: %d N %d S %d PartID %d\n", extension, vp8->nonReferenceFrame, vp8->beginningOfPartition, vp8->partitionID); + + if (vp8->partitionID > 8) { + // Weak check for corrupt data: PartID MUST NOT be larger than 8. + return vp8; + } + + // Advance dataPtr and decrease remaining payload size + dataPtr++; + dataLength--; + + if (extension) { + const int parsedBytes = ParseVP8Extension(vp8, dataPtr, dataLength); + if (parsedBytes < 0) + return vp8; + dataPtr += parsedBytes; + dataLength -= parsedBytes; + } + + if (dataLength <= 0) { + printf("Error parsing VP8 payload descriptor; payload too short\n"); + return vp8; + } + + std::string frametype; + // Read P bit from payload header (only at beginning of first partition) + if (dataLength > 0 && vp8->beginningOfPartition && vp8->partitionID == 0) { + //parsedPacket.frameType = (*dataPtr & 0x01) ? kPFrame : kIFrame; + frametype = (*dataPtr & 0x01) ? "kPFrame" : "kIFrame"; + } else { + + frametype = "kPFrame"; + } +// if (0 != ParseVP8FrameSize(parsedPacket, dataPtr, dataLength)) { +// return *vp8; +// } + vp8->data = dataPtr; + vp8->dataLength = (unsigned int) dataLength; + + return vp8; +} +} + diff --git a/erizo/src/erizo/media/rtp/RtpParser.h b/erizo/src/erizo/media/rtp/RtpParser.h new file mode 100644 index 0000000000..640e7eb267 --- /dev/null +++ b/erizo/src/erizo/media/rtp/RtpParser.h @@ -0,0 +1,40 @@ +#ifndef RTPUTILS_H_ +#define RTPUTILS_H_ + +namespace erizo { + +typedef struct { + bool nonReferenceFrame; + bool beginningOfPartition; + int partitionID; + bool hasPictureID; + bool hasTl0PicIdx; + bool hasTID; + bool hasKeyIdx; + int pictureID; + int tl0PicIdx; + int tID; + bool layerSync; + int keyIdx; + int frameWidth; + int frameHeight; + + const unsigned char* data; + unsigned int dataLength; +} RTPPayloadVP8; + +enum FrameTypes { + kIFrame, // key frame + kPFrame // Delta frame +}; + +class RtpParser { + +public: + RtpParser(); + virtual ~RtpParser(); + erizo::RTPPayloadVP8* parseVP8(unsigned char* data, int datalength); +}; + +} /* namespace erizo */ +#endif /* RTPUTILS_H_ */ diff --git a/erizo/src/erizo/media/rtp/RtpVP8Fragmenter.cpp b/erizo/src/erizo/media/rtp/RtpVP8Fragmenter.cpp new file mode 100644 index 0000000000..ce047fa951 --- /dev/null +++ b/erizo/src/erizo/media/rtp/RtpVP8Fragmenter.cpp @@ -0,0 +1,63 @@ +#include +#include +#include "RtpVP8Fragmenter.h" + +#define MAX_SIZE 1100 //max fragment size including vp8 payload descriptor +#define VP8 1 +namespace erizo { + +RtpVP8Fragmenter::RtpVP8Fragmenter(unsigned char* data, unsigned int length, + unsigned int maxLength) : + totalData_(data), totalLenth_(length), maxlength_(maxLength) { + calculatePackets(); +} + +RtpVP8Fragmenter::~RtpVP8Fragmenter() { +} + +int RtpVP8Fragmenter::getPacket(unsigned char* data, unsigned int* length, + bool* lastPacket) { + if (fragmentQueue_.size() > 0) { + const Fragment& test = fragmentQueue_.front(); + + *length = writeFragment(test, data, length); + fragmentQueue_.pop(); + if (fragmentQueue_.empty()) + *lastPacket = true; + } + return 0; +} +void RtpVP8Fragmenter::calculatePackets() { + unsigned int remaining = totalLenth_; + unsigned int currentPos = 0; + while (remaining > 0) { +// printf("Packetizing, remaining %u\n", remaining); + Fragment newFragment; + newFragment.first = false; + newFragment.position = currentPos; + if (currentPos == 0) + newFragment.first = true; + newFragment.size = remaining > MAX_SIZE - 1 ? MAX_SIZE - 1 : remaining; +// printf("New fragment size %u, position %u\n", newFragment.size, +// newFragment.position); + currentPos += newFragment.size; + remaining -= newFragment.size; + fragmentQueue_.push(newFragment); + } +} +unsigned int RtpVP8Fragmenter::writeFragment(const Fragment& fragment, + unsigned char* buffer, unsigned int* length) { + + if (VP8) { + buffer[0] = 0x0; + if (fragment.first) + buffer[0] |= 0x10; // S bit 1 // era 01 + memcpy(&buffer[1], &totalData_[fragment.position], fragment.size); + return (fragment.size + 1); + } else { + memcpy(&buffer[0], &totalData_[fragment.position], fragment.size); + return fragment.size; + } +} + +} /* namespace erizo */ diff --git a/erizo/src/erizo/media/rtp/RtpVP8Fragmenter.h b/erizo/src/erizo/media/rtp/RtpVP8Fragmenter.h new file mode 100644 index 0000000000..620971d0fe --- /dev/null +++ b/erizo/src/erizo/media/rtp/RtpVP8Fragmenter.h @@ -0,0 +1,31 @@ +#ifndef RTPFRAGMENTER_H_ +#define RTPFRAGMENTER_H_ + +#include + +namespace erizo { + +class RtpVP8Fragmenter { +public: + RtpVP8Fragmenter(unsigned char* data, unsigned int length, unsigned int maxLength); + virtual ~RtpVP8Fragmenter(); + + int getPacket(unsigned char* data, unsigned int* length, bool* lastPacket); + +private: + struct Fragment { + unsigned int position; + unsigned int size; + bool first; + }; + void calculatePackets(); + unsigned int writeFragment(const Fragment& fragment, unsigned char* buffer, + unsigned int* length); + unsigned char* totalData_; + unsigned int totalLenth_; + unsigned int maxlength_; + std::queue fragmentQueue_; +}; + +} /* namespace erizo */ +#endif /* RTPFRAGMENTER_H_ */ diff --git a/erizo/src/examples/CMakeLists.txt b/erizo/src/examples/CMakeLists.txt new file mode 100644 index 0000000000..ec78267b10 --- /dev/null +++ b/erizo/src/examples/CMakeLists.txt @@ -0,0 +1,19 @@ +cmake_minimum_required(VERSION 2.8) +#functions +function(test_lib LIB_NAME) + if (${LIB_NAME} MATCHES "^.*-NOTFOUND") + message(FATAL_ERROR "lib not found: " ${LIB_NAME} " check README") + return() + endif(${LIB_NAME} MATCHES "^.*-NOTFOUND") +endfunction(test_lib) + +project (ERIZO_EXAMPLES) +file(GLOB_RECURSE ERIZO_EXAMPLES_SOURCES ${ERIZO_EXAMPLES_SOURCE_DIR}/*.cpp ${ERIZO_EXAMPLES_SOURCE_DIR}/*.h) +add_executable(hsam ${ERIZO_EXAMPLES_SOURCES}) +include_directories(${ERIZO_EXAMPLES_SOURCE_DIR}/../erizo) +link_directories(${ERIZO_EXAMPLES_SOURCE_DIR}/../../build) + +set (BOOST_LIBS thread regex system) +find_package(Boost COMPONENTS ${BOOST_LIBS} REQUIRED) +target_link_libraries(hsam ${Boost_LIBRARIES}) +target_link_libraries(hsam ${EXTRA_LIBS}) diff --git a/erizo/src/examples/Test.cpp b/erizo/src/examples/Test.cpp new file mode 100644 index 0000000000..dd2f790a64 --- /dev/null +++ b/erizo/src/examples/Test.cpp @@ -0,0 +1,162 @@ +#include +#include + +#include +#include + +#include "Test.h" + +using boost::asio::ip::udp; + +namespace erizo { +Test::Test() { + ip = new InputProcessor(); + MediaInfo m; + ip->init(m, this); +// mp = new MediaProcessor(); +// +//// audioCodecInfo *i = new audioCodecInfo; +//// i->codec = CODEC_ID_AMR_NB; +//// i->bitRate = 300000; +//// i->sampleRate = 44100; +//// mp->initAudioDecoder(i); +// +//// RTPInfo *r = new RTPInfo; +//// r->codec = CODEC_ID_MP3; +//// mp->initAudioUnpackagerRTP(r); +// +// videoCodecInfo *v = new videoCodecInfo; +// v->codec = CODEC_ID_VP8; +//// v->codec = CODEC_ID_MPEG4; +// //v->width = 706; +// //v->height = 396; +// v->width = 400; +// v->height = 300; +// +// mp->initVideoDecoder(v); +// +// videoCodecInfo *c = new videoCodecInfo; +// //c->codec = CODEC_ID_MPEG2VIDEO; +// c->codec = CODEC_ID_MPEG2VIDEO; +// c->width = v->width; +// c->height = v->height; +// c->frameRate = 24; +// c->bitRate = 1024; +// c->maxInter = 0; +// +// mp->initVideoCoder(c); +// +// RTPInfo *r = new RTPInfo; +// //r->codec = CODEC_ID_MPEG2VIDEO; +//// r->codec = CODEC_ID_MPEG4; +// mp->initVideoPackagerRTP(r); +// mp->initVideoUnpackagerRTP(r); +// + ioservice_ = new boost::asio::io_service; + resolver_ = new udp::resolver(*ioservice_); + socket_ = new udp::socket(*ioservice_, udp::endpoint(udp::v4(), 40000)); + query_ = new udp::resolver::query(udp::v4(), "127.0.0.1", "50000"); + + boost::thread t = boost::thread(&Test::rec, this); + t.join(); + +} + +Test::~Test() { + //sock->disconnect(); +} + +void Test::receiveRawData(unsigned char*data, int len) { + printf("decoded data %d\n", len); + return; +} + +void Test::rec() { + +// int outBuff2Size = 706 * 396 * 3 / 2; +// + char* buff = (char*)malloc(2000); +// char * outBuff = (char*) malloc(50000); +// memset(outBuff, 0, 50000); +// char * outBuff2 = (char*) malloc(outBuff2Size); +// +// int gotFrame = 0; +// int size = 0; +// int gotDecFrame = 0; +// +// std::string s; +// unsigned short u; + int a; + while (true) { +// + memset(buff, 0, 2000); +// + a = socket_->receive(boost::asio::buffer(buff, 2000)); + ip->receiveVideoData(buff, a); +// printf("********* RECEPCIÓN *********\n"); +// printf("Bytes = %d\n", a); +// +// int z, b; +// z = mp->unpackageVideoRTP((char*) buff, a, outBuff, &gotFrame); +// +//// RTPPayloadVP8* parsed = pars.parseVP8((unsigned char*)outBuff, z); +//// b = parsed->dataLength; +//// int c = parsed.dataLength; +// +//// printf("Bytes desem = %d prevp8 %d\n", b, z ); +// +// size += z; +// outBuff += z; +// +// if (gotFrame) { +// +// outBuff -= size; +// +// printf("Tengo un frame desempaquetado!! Size = %d\n", size); +// +// int c; +// +// c = mp->decodeVideo(outBuff, size, outBuff2, outBuff2Size, +// &gotDecFrame); +// printf("Bytes dec = %d\n", c); +// +// size = 0; +// memset(outBuff, 0, 50000); +// gotFrame = 0; +// +// if (gotDecFrame) { +// printf("Tengo un frame decodificado!!"); +// gotDecFrame = 0; +// send(outBuff2, c); +// } + } +// } + +} + +void Test::send(char *buff, int buffSize) { + +// printf("\n********* ENVÍO *********"); +// +// char *buffSend = (char*) malloc(buffSize); +// char *buffSend2 = (char*) malloc(buffSize); +// +// int a = mp->encodeVideo(buff, buffSize, buffSend, buffSize); +// +// printf("\nBytes codificados = %d", a); +// +// int b = mp->packageVideoRTP(buffSend, a, buffSend2); +// +// printf("\nBytes empaquetados = %d", b); +// +//// udp::resolver::iterator iterator = resolver_->resolve(*query_); +// +//// socket_->send_to(buffSend2, b, "toronado.dit.upm.es", 5005); +//// socket_->send_to(boost::asio::buffer(buffSend2, b), *iterator); +// free(buffSend); +// free(buffSend2); +// +// printf("\n*************************"); +} + +} diff --git a/erizo/src/examples/Test.h b/erizo/src/examples/Test.h new file mode 100644 index 0000000000..b715a70e44 --- /dev/null +++ b/erizo/src/examples/Test.h @@ -0,0 +1,29 @@ +#include +#include +#include + +#ifndef TEST_H_ +#define TEST_H_ +namespace erizo{ +class Test: public RawDataReceiver { +public: + Test(); + virtual ~Test(); + void receiveRawData(unsigned char*data, int len); + + void rec(); + void send(char *buff, int buffSize); +private: + + boost::asio::ip::udp::socket* socket_; + boost::asio::ip::udp::resolver* resolver_; + + boost::asio::ip::udp::resolver::query* query_; + boost::asio::io_service* ioservice_; + InputProcessor* ip; + erizo::RtpParser pars; + +}; + +} +#endif /* TEST_H_ */ diff --git a/erizo/src/examples/hsam.cpp b/erizo/src/examples/hsam.cpp new file mode 100644 index 0000000000..fca1d457fa --- /dev/null +++ b/erizo/src/examples/hsam.cpp @@ -0,0 +1,83 @@ +/* + * hsam.cpp + */ + +#include + +#include +#include +#include +#include +#include "Test.h" +#include "pc/Observer.h" + +using namespace erizo; + +int publisherid = 0; +int main() { + + new Test(); + +// SDPReceiver* receiver = new SDPReceiver(); +// Observer *subscriber = new Observer("subscriber", receiver); +// new Observer("publisher", receiver); +// subscriber->wait(); +// return 0; +} + +SDPReceiver::SDPReceiver() { + muxer = new erizo::OneToManyProcessor(); +} + +bool SDPReceiver::createPublisher(int peer_id) { + if (muxer->publisher == NULL) { + printf("Adding publisher peer_id %d\n", peer_id); + WebRtcConnection *newConn = new WebRtcConnection; + newConn->init(); + newConn->setAudioReceiver(muxer); + newConn->setVideoReceiver(muxer); + muxer->setPublisher(newConn); + publisherid = peer_id; + } else { + printf("PUBLISHER ALREADY SET\n"); + return false; + } + return true; +} +bool SDPReceiver::createSubscriber(int peer_id) { + printf("Adding Subscriber peerid %d\n", peer_id); + if (muxer->subscribers.find(peer_id) != muxer->subscribers.end()) { + printf("OFFER AGAIN\n"); + return false; + } + + WebRtcConnection *newConn = new WebRtcConnection; + newConn->init(); + muxer->addSubscriber(newConn, peer_id); + return true; +} +void SDPReceiver::setRemoteSDP(int peer_id, const std::string &sdp) { + if (peer_id == publisherid) { + muxer->publisher->setRemoteSdp(sdp); + + } else { + muxer->subscribers[peer_id]->setRemoteSdp(sdp); + } +} +std::string SDPReceiver::getLocalSDP(int peer_id) { + std::string sdp; + if (peer_id == publisherid) { + sdp = muxer->publisher->getLocalSdp(); + } else { + sdp = muxer->subscribers[peer_id]->getLocalSdp(); + } + printf("Getting localSDP %s\n", sdp.c_str()); + return sdp; +} +void SDPReceiver::peerDisconnected(int peer_id) { + if (peer_id != publisherid) { + printf("removing peer %d\n", peer_id); + muxer->removeSubscriber(peer_id); + } +} + diff --git a/erizo/src/examples/pc/Observer.cpp b/erizo/src/examples/pc/Observer.cpp new file mode 100644 index 0000000000..b2f81003ee --- /dev/null +++ b/erizo/src/examples/pc/Observer.cpp @@ -0,0 +1,111 @@ +/* + * Observer.cpp + */ + +#include +#include +#include "Observer.h" + +Observer::Observer(std::string name, SDPReceiver *receiver) : + pc_(new PC(name)), name_(name), receiver_(receiver) { + this->init(); +} + +Observer::~Observer() { +} + +void Observer::wait() { + m_Thread_.join(); +} + +void Observer::init() { + m_Thread_ = boost::thread(&Observer::start, this); +} + +void Observer::start() { + pc_->RegisterObserver(this); + + pc_->Connect(name_); + printf("Connected\n"); + while (true) { + pc_->OnHangingGetConnect(); + pc_->OnHangingGetRead(); + sleep(1); + } +} + +void Observer::processMessage(int peer_id, const std::string& message) { + printf("Processing Message %d, %s", peer_id, message.c_str()); + printf("OFFER1\n"); + std::string roap = message; + + // Pillar el OffererId + // Generar AnswererId + if (name_ == "publisher") { + if (!receiver_->createPublisher(peer_id)) + return; + } else { + if (!receiver_->createSubscriber(peer_id)) + return; + } + + std::string sdp = receiver_->getLocalSDP(peer_id); + std::string sdp2 = Observer::Match(roap, "^.*sdp\":\"(.*)\",.*$"); + Observer::Replace(sdp2, "\\\\r\\\\n", "\\n"); + printf("sdp OFFER!!!!!!!!!!!!\n%s\n", sdp2.c_str()); + receiver_->setRemoteSDP(peer_id, sdp2); + + Observer::Replace(sdp, "\n", "\\\\r\\\\n"); + std::string answererSessionId = "106"; +// std::string offererSessionId = Observer::Match(roap, "^.*offererSessionId\":(.{32,32}).*$"); + std::string offererSessionId = Observer::Match(roap, + "^.*offererSessionId\":(...).*$"); + std::string answer1("\n{\n \"messageType\":\"ANSWER\",\n"); + printf("sdp ANSWEEEER!!!!!!! %s\n", sdp.c_str()); + answer1.append(" \"sdp\":\"").append(sdp).append("\",\n"); + answer1.append(" \"offererSessionId\":").append(offererSessionId).append( + ",\n"); + answer1.append(" \"answererSessionId\":").append(answererSessionId).append( + ",\n"); + answer1.append(" \"seq\" : 1\n}\n"); + pc_->SendToPeer(peer_id, answer1); + +} + +void Observer::OnSignedIn() { +} +void Observer::OnDisconnected() { + pthread_exit(0); +} +void Observer::OnPeerConnected(int id, const std::string& name) { + +} +void Observer::OnPeerDisconnected(int peer_id) { + receiver_->peerDisconnected(peer_id); + +} +void Observer::OnMessageFromPeer(int peer_id, const std::string& message) { + printf("OnMessageFromPeer\n"); + printf("message : %s\n", message.c_str()); + std::string roap = message; + if (roap.find("OFFER") != std::string::npos) { + boost::thread theThread(&Observer::processMessage, this, peer_id, + message); + } +} +void Observer::OnMessageSent(int err) { + +} + +void Observer::Replace(std::string& text, const std::string& pattern, + const std::string& replace) { + boost::regex regex_pattern(pattern, boost::regex_constants::perl); + text = boost::regex_replace(text, regex_pattern, replace); +} +std::string Observer::Match(const std::string& text, + const std::string& pattern) { + boost::regex regex_pattern(pattern); + boost::cmatch what; + boost::regex_match(text.c_str(), what, regex_pattern); + return (std::string(what[1].first, what[1].second)); +} diff --git a/erizo/src/examples/pc/Observer.h b/erizo/src/examples/pc/Observer.h new file mode 100644 index 0000000000..bc13b814ec --- /dev/null +++ b/erizo/src/examples/pc/Observer.h @@ -0,0 +1,43 @@ +/* + * Observer.h + */ + +#ifndef OBSERVER_H_ +#define OBSERVER_H_ + +#include +#include + +#include "SDPReceiver.h" +#include "PCSocket.h" + +class Observer: PCClientObserver { +public: + Observer(std::string name, SDPReceiver *receiver); + ~Observer(); + void OnSignedIn(); // Called when we're logged on. + void OnDisconnected(); + void OnPeerConnected(int id, const std::string& name); + void OnPeerDisconnected(int peer_id); + void OnMessageFromPeer(int peer_id, const std::string& message); + void OnMessageSent(int err); + void wait(); + + static void Replace(std::string& text, const std::string& pattern, + const std::string& replace); + static std::string Match(const std::string& text, + const std::string& pattern); + +private: + void init(); + void start(); + void processMessage(int peerid, const std::string& message); + + PC *pc_; + boost::thread m_Thread_; + std::string name_; + SDPReceiver *receiver_; +}; + +#endif /* OBSERVER_H_ */ + diff --git a/erizo/src/examples/pc/PCSocket.cpp b/erizo/src/examples/pc/PCSocket.cpp new file mode 100644 index 0000000000..3c0f2f74fe --- /dev/null +++ b/erizo/src/examples/pc/PCSocket.cpp @@ -0,0 +1,168 @@ +/* + * PCSocket.cpp + */ + +#include "PCSocket.h" +#include + + +#define PUBLISHER_PORT 8484 +#define SUBSCRIBER_PORT 8485 +std::string servAddress = "rocky.dit.upm.es"; //INSERT NODE SERVER ADDRESS HERE + +using boost::asio::ip::tcp; + +const char kByeMessage[] = "BYE"; + +PC::PC(const std::string &name) : + callback_(NULL), state_(NOT_CONNECTED), my_id_(-1), isSending(false) { + + ioservice_ = new boost::asio::io_service; + resolver_ = new tcp::resolver(*ioservice_); + + if (name.compare("publisher") == 0) { + control_socket_ = CreateClientSocket(PUBLISHER_PORT); + } else if (name.compare("subscriber") == 0) { + control_socket_ = CreateClientSocket(SUBSCRIBER_PORT); + } + +} + +PC::~PC() { +} + +int PC::id() const { + return my_id_; +} + +bool PC::is_connected() const { + return my_id_ != -1; +} + +const Peers& PC::peers() const { + return peers_; +} + +void PC::RegisterObserver(PCClientObserver* callback) { + callback_ = callback; +} + +tcp::socket* PC::CreateClientSocket(int port) { + // tcp::socket *sock = new tcp::socket (servAddress, port); + char portchar[6]; + sprintf(portchar, "%d", port); + tcp::resolver::query query(tcp::v4(), servAddress, portchar); + tcp::resolver::iterator iterator = resolver_->resolve(query); + tcp::socket *sock = new tcp::socket(*ioservice_); + boost::asio::connect(*sock, iterator); + return sock; +} + +bool PC::Connect(const std::string& client_name) { + std::string signin = "SIGN_IN;"; + signin.append(client_name).append(";"); + //control_socket_->send(boost::asio::buffer(signin, signin.length() ), signin.length()); + boost::asio::write(*control_socket_, + boost::asio::buffer((char*) signin.c_str(), signin.length())); + state_ = CONNECTED; + return true; +} + +bool PC::SendHangUp(int peer_id) { + return true; +} + +bool PC::IsSendingMessage() { + return (isSending); +} +bool PC::SignOut() { + return true; +} + +bool PC::ReadIntoBuffer(boost::asio::ip::tcp::socket* socket, std::string* data, + size_t* content_length) { + char charbuf[10000]; + + size_t reply_length = control_socket_->read_some( + boost::asio::buffer(charbuf, 10000)); + if (reply_length <= 0) + state_ = NOT_CONNECTED; + data->append(charbuf, reply_length); + *content_length = strlen(data->c_str()); + printf("RECEIVED %s \n", data->c_str()); + parseMessage(data); + return true; + +} + +void PC::OnRead(boost::asio::ip::tcp::socket* socket) { + size_t content_length = 0; + std::string the_data; + if (state_ == CONNECTED) { + if (ReadIntoBuffer(socket, &the_data, &content_length)) { + // printf("DATA\n%s\n", the_data.c_str()); + } + } +} +void PC::OnClose(boost::asio::ip::tcp::socket* socket, int err) { + +} + +void PC::Close() { + // control_socket_->Close(); + // hanging_get_->Close(); + +} + +void PC::OnHangingGetConnect() { + +} + +void PC::OnHangingGetRead() { + + OnRead(control_socket_); + +} + +void PC::parseMessage(std::string *data) { + size_t found1, found2; + found1 = data->find(';'); + if (found1 == std::string::npos) { + printf("Invalid Signalling Message\n"); + return; + } + std::string method = data->substr(0, found1); + found2 = data->find(';', found1 + 1); + if (found2 == std::string::npos) { + printf("Invalid Signalling Message\n"); + return; + } + std::string id = data->substr(found1 + 1, found2 - (found1 + 1)); + std::string message = data->substr(found2 + 1, data->length()); + int the_id = atoi(id.c_str()); + if (method.compare("MSG_FROM_PEER") == 0) + OnMessageFromPeer(the_id, message); + if (method.compare("BYE") == 0) { + callback_->OnPeerDisconnected(the_id); + } + +} + +void PC::OnMessageFromPeer(int peer_id, const std::string& message) { + callback_->OnMessageFromPeer(peer_id, message); + +} + +bool PC::SendToPeer(int peer_id, const std::string& message) { + printf("SENDING TO %d \n %s\n", peer_id, message.c_str()); + char* peer[5]; + sprintf((char*) peer, "%d", peer_id); + std::string msg; + msg.append("MSG_TO_PEER;").append((char*) peer).append(";").append(message); + // control_socket_->send(msg.c_str(), msg.length()); + boost::asio::write(*control_socket_, + boost::asio::buffer((char*) msg.c_str(), msg.length())); + + return true; +} + diff --git a/erizo/src/examples/pc/PCSocket.h b/erizo/src/examples/pc/PCSocket.h new file mode 100644 index 0000000000..b8775f2cf1 --- /dev/null +++ b/erizo/src/examples/pc/PCSocket.h @@ -0,0 +1,82 @@ +#ifndef __PC_INCLUDED__ +#define __PC_INCLUDED__ + + +#include +#include + +typedef std::map Peers; + +class PCClientObserver { +public: + virtual void OnSignedIn() = 0; // Called when we're logged on. + virtual void OnDisconnected() = 0; + virtual void OnPeerConnected(int id, const std::string& name) = 0; + virtual void OnPeerDisconnected(int peer_id) = 0; + virtual void OnMessageFromPeer(int peer_id, const std::string& message) = 0; + virtual void OnMessageSent(int err) = 0; + virtual ~PCClientObserver() {} +}; + +class PC { + public: + enum State { + NOT_CONNECTED, + SIGNING_IN, + CONNECTED, + SIGNING_OUT_WAITING, + SIGNING_OUT + }; + + PC(const std::string &name); + ~PC(); + + int id() const; + bool is_connected() const; + const Peers& peers() const; + + void RegisterObserver(PCClientObserver* callback); + + bool Connect(const std::string& client_name); + + bool SendToPeer(int peer_id, const std::string& message); + bool SendHangUp(int peer_id); + bool IsSendingMessage(); + + bool SignOut(); + void OnHangingGetRead(); + void OnHangingGetConnect(); + + protected: + void Close(); + bool ConnectControlSocket(); + boost::asio::ip::tcp::socket* CreateClientSocket(int port); + void OnMessageFromPeer(int peer_id, const std::string& message); + + // Returns true if the whole response has been read. + bool ReadIntoBuffer(boost::asio::ip::tcp::socket* socket, std::string* data, + size_t* content_length); + void OnRead(boost::asio::ip::tcp::socket* socket); + void parseMessage(std::string *data); + + + void OnClose(boost::asio::ip::tcp::socket* socket, int err); + + PCClientObserver* callback_; + std::string server_address_; + + boost::asio::ip::tcp::socket* control_socket_; + boost::asio::ip::tcp::resolver* resolver_; + boost::asio::io_service* ioservice_; + + std::string onconnect_data_; + std::string control_data_; + std::string notification_data_; + Peers peers_; + State state_; + int my_id_; + bool isSending; +}; + + +#endif diff --git a/erizo/src/examples/pc/SDPReceiver.h b/erizo/src/examples/pc/SDPReceiver.h new file mode 100644 index 0000000000..02fab7c24c --- /dev/null +++ b/erizo/src/examples/pc/SDPReceiver.h @@ -0,0 +1,18 @@ +#include + +class SDPReceiver { + +public: + + SDPReceiver(); + virtual ~SDPReceiver(){}; + bool createPublisher(int peer_id); + bool createSubscriber(int peer_id); + void setRemoteSDP(int peer_id, const std::string &sdp); + std::string getLocalSDP(int peer_id); + void peerDisconnected(int peer_id); + +private: + + erizo::OneToManyProcessor* muxer; +};