diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..ad87331 --- /dev/null +++ b/.gitignore @@ -0,0 +1,3 @@ +.idea +.vscode + diff --git a/LICENSE b/LICENSE new file mode 100644 index 0000000..ad410e1 --- /dev/null +++ b/LICENSE @@ -0,0 +1,201 @@ +Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} {name of copyright owner} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. \ No newline at end of file diff --git a/README.md b/README.md index cbd5a0d..52784e4 100644 --- a/README.md +++ b/README.md @@ -1,93 +1,44 @@ -# goserver +## goserver +goserver 旨在做一个传统的CS结构的服务器框架 +目前主要用于游戏服务器开发 +框架还在不断努力完善中,如果你对它感兴趣,请关注它的动态或者参与进来 +## Features -## Getting started +* 组件通过package的概念统一管理(可以理解为win32下的dll),由config来配置各个组件的特性参数 +* goroutine通过Object进行包装以树型结构组织,Object间的通信通过command(内部是chan),主要是为了预防chan滥用、失控,从而造成各种死锁问题 +* 提供了时间,任务,事务,计划工作,网络通讯,模块管理的内置组件 +* 提供一套传统的游戏服务器架构(制作中...) -To make it easy for you to get started with GitLab, here's a list of recommended next steps. - -Already a pro? Just edit this README.md and make it your own. Want to make it easy? [Use the template at the bottom](#editing-this-readme)! - -## Add your files - -- [ ] [Create](https://docs.gitlab.com/ee/user/project/repository/web_editor.html#create-a-file) or [upload](https://docs.gitlab.com/ee/user/project/repository/web_editor.html#upload-a-file) files -- [ ] [Add files using the command line](https://docs.gitlab.com/ee/gitlab-basics/add-file.html#add-a-file-using-the-command-line) or push an existing Git repository with the following command: - -``` -cd existing_repo -git remote add origin https://git.pogorockgames.com/mango-games/server/goserver.git -git branch -M main -git push -uf origin main -``` - -## Integrate with your tools - -- [ ] [Set up project integrations](https://git.pogorockgames.com/mango-games/server/goserver/-/settings/integrations) - -## Collaborate with your team - -- [ ] [Invite team members and collaborators](https://docs.gitlab.com/ee/user/project/members/) -- [ ] [Create a new merge request](https://docs.gitlab.com/ee/user/project/merge_requests/creating_merge_requests.html) -- [ ] [Automatically close issues from merge requests](https://docs.gitlab.com/ee/user/project/issues/managing_issues.html#closing-issues-automatically) -- [ ] [Enable merge request approvals](https://docs.gitlab.com/ee/user/project/merge_requests/approvals/) -- [ ] [Set auto-merge](https://docs.gitlab.com/ee/user/project/merge_requests/merge_when_pipeline_succeeds.html) - -## Test and Deploy - -Use the built-in continuous integration in GitLab. - -- [ ] [Get started with GitLab CI/CD](https://docs.gitlab.com/ee/ci/quick_start/index.html) -- [ ] [Analyze your code for known vulnerabilities with Static Application Security Testing (SAST)](https://docs.gitlab.com/ee/user/application_security/sast/) -- [ ] [Deploy to Kubernetes, Amazon EC2, or Amazon ECS using Auto Deploy](https://docs.gitlab.com/ee/topics/autodevops/requirements.html) -- [ ] [Use pull-based deployments for improved Kubernetes management](https://docs.gitlab.com/ee/user/clusters/agent/) -- [ ] [Set up protected environments](https://docs.gitlab.com/ee/ci/environments/protected_environments.html) - -*** - -# Editing this README - -When you're ready to make this README your own, just edit this file and use the handy template below (or feel free to structure it however you want - this is just a starting point!). Thanks to [makeareadme.com](https://www.makeareadme.com/) for this template. - -## Suggestions for a good README - -Every project is different, so consider which of these sections apply to yours. The sections used in the template are suggestions for most open source projects. Also keep in mind that while a README can be too long and detailed, too long is better than too short. If you think your README is too long, consider utilizing another form of documentation rather than cutting out information. - -## Name -Choose a self-explaining name for your project. - -## Description -Let people know what your project can do specifically. Provide context and add a link to any reference visitors might be unfamiliar with. A list of Features or a Background subsection can also be added here. If there are alternatives to your project, this is a good place to list differentiating factors. - -## Badges -On some READMEs, you may see small images that convey metadata, such as whether or not all the tests are passing for the project. You can use Shields to add some to your README. Many services also have instructions for adding a badge. - -## Visuals -Depending on what you are making, it can be a good idea to include screenshots or even a video (you'll frequently see GIFs rather than actual videos). Tools like ttygif can help, but check out Asciinema for a more sophisticated method. - -## Installation -Within a particular ecosystem, there may be a common way of installing things, such as using Yarn, NuGet, or Homebrew. However, consider the possibility that whoever is reading your README is a novice and would like more guidance. Listing specific steps helps remove ambiguity and gets people to using your project as quickly as possible. If it only runs in a specific context like a particular programming language version or operating system or has dependencies that have to be installed manually, also add a Requirements subsection. - -## Usage -Use examples liberally, and show the expected output if you can. It's helpful to have inline the smallest example of usage that you can demonstrate, while providing links to more sophisticated examples if they are too long to reasonably include in the README. - -## Support -Tell people where they can go to for help. It can be any combination of an issue tracker, a chat room, an email address, etc. - -## Roadmap -If you have ideas for releases in the future, it is a good idea to list them in the README. - -## Contributing -State if you are open to contributions and what your requirements are for accepting them. - -For people who want to make changes to your project, it's helpful to have some documentation on how to get started. Perhaps there is a script that they should run or some environment variables that they need to set. Make these steps explicit. These instructions could also be useful to your future self. - -You can also document commands to lint the code or run tests. These steps help to ensure high code quality and reduce the likelihood that the changes inadvertently break something. Having instructions for running tests is especially helpful if it requires external setup, such as starting a Selenium server for testing in a browser. - -## Authors and acknowledgment -Show your appreciation to those who have contributed to the project. - -## License -For open source projects, say how it is licensed. - -## Project status -If you have run out of energy or time for your project, put a note at the top of the README saying that development has slowed down or stopped completely. Someone may choose to fork your project or volunteer to step in as a maintainer or owner, allowing your project to keep going. You can also make an explicit request for maintainers. +## 模块说明 +* +core 核心模块 + * admin : http管理接口,主要提供一种外部可以操控进程的可能 + * basic : 基础的线程对象,封装对象间内部通讯;避免chan环锁现象,树形管理object + * bulletin: 框架内建元素,提供通讯层的一些基础过滤器和通讯协议 + * cmdline: 自建命令行,给控制台进程提供一种命令模式 + * container: 框架用到的一些容器,队列,回收器,线程安全list,线程安全map + * i18n: 国际化配置 + * logger: 日志接口 + * module: 业务模块管理,提供统一的心跳管理,模块通过注册挂载到管理器 + * mongo: mogodb相关配置 + * netlib: 通讯模块,支持TCP和WebSocket两种通讯方式 + * profile: 性能统计相关,用于辅助查找性能热点 + * schedule: 定时任务调度模块,用于周期job处理,如:每日凌晨4:00进行日志清理 + * signal: 信号管理模块,hook操作系统的信号进行回调处理,如:kill -2 PID + * task: 线程模块,提供线程池、实名线程和独立线程多种模式 + * timer: 定时器,有别于go内置的timer;主要用于确保线程安全问题 + * transact: 分布式事务,基于二段提交实现,协调多节点配合完成一件原子性操作 + * utils: 工具接口 + * zk: zookeeper接口,用于分布式协调 +* +srvlib core/netlib的扩展封装,提供常用的客户端session和服务端service管理,以及服务发现;进一步封装,使框架层达到拆箱即用 + * action 内置常用的包重定向和中转操作 + * handler 提供基本的session和service管理 + * protocol 内置协议定义 +* +examples 示例程序 + * echoclient 回声客户端程序 + * echoserver 回声服务端程序 + * other timer和task使用示例 + * txserver1 分布式事务节点1 + * txserver2 分布式事务节点2 +* +mmo 提供一套基本的服务器架构模板 \ No newline at end of file diff --git a/bin/protoc-3.5.1-win32/bin/protoc.exe b/bin/protoc-3.5.1-win32/bin/protoc.exe new file mode 100644 index 0000000..16bc9c3 Binary files /dev/null and b/bin/protoc-3.5.1-win32/bin/protoc.exe differ diff --git a/bin/protoc-3.5.1-win32/include/google/protobuf/any.proto b/bin/protoc-3.5.1-win32/include/google/protobuf/any.proto new file mode 100644 index 0000000..c748667 --- /dev/null +++ b/bin/protoc-3.5.1-win32/include/google/protobuf/any.proto @@ -0,0 +1,149 @@ +// Protocol Buffers - Google's data interchange format +// Copyright 2008 Google Inc. All rights reserved. +// https://developers.google.com/protocol-buffers/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +syntax = "proto3"; + +package google.protobuf; + +option csharp_namespace = "Google.Protobuf.WellKnownTypes"; +option go_package = "github.com/golang/protobuf/ptypes/any"; +option java_package = "com.google.protobuf"; +option java_outer_classname = "AnyProto"; +option java_multiple_files = true; +option objc_class_prefix = "GPB"; + +// `Any` contains an arbitrary serialized protocol buffer message along with a +// URL that describes the type of the serialized message. +// +// Protobuf library provides support to pack/unpack Any values in the form +// of utility functions or additional generated methods of the Any type. +// +// Example 1: Pack and unpack a message in C++. +// +// Foo foo = ...; +// Any any; +// any.PackFrom(foo); +// ... +// if (any.UnpackTo(&foo)) { +// ... +// } +// +// Example 2: Pack and unpack a message in Java. +// +// Foo foo = ...; +// Any any = Any.pack(foo); +// ... +// if (any.is(Foo.class)) { +// foo = any.unpack(Foo.class); +// } +// +// Example 3: Pack and unpack a message in Python. +// +// foo = Foo(...) +// any = Any() +// any.Pack(foo) +// ... +// if any.Is(Foo.DESCRIPTOR): +// any.Unpack(foo) +// ... +// +// Example 4: Pack and unpack a message in Go +// +// foo := &pb.Foo{...} +// any, err := ptypes.MarshalAny(foo) +// ... +// foo := &pb.Foo{} +// if err := ptypes.UnmarshalAny(any, foo); err != nil { +// ... +// } +// +// The pack methods provided by protobuf library will by default use +// 'type.googleapis.com/full.type.name' as the type URL and the unpack +// methods only use the fully qualified type name after the last '/' +// in the type URL, for example "foo.bar.com/x/y.z" will yield type +// name "y.z". +// +// +// JSON +// ==== +// The JSON representation of an `Any` value uses the regular +// representation of the deserialized, embedded message, with an +// additional field `@type` which contains the type URL. Example: +// +// package google.profile; +// message Person { +// string first_name = 1; +// string last_name = 2; +// } +// +// { +// "@type": "type.googleapis.com/google.profile.Person", +// "firstName": , +// "lastName": +// } +// +// If the embedded message type is well-known and has a custom JSON +// representation, that representation will be embedded adding a field +// `value` which holds the custom JSON in addition to the `@type` +// field. Example (for message [google.protobuf.Duration][]): +// +// { +// "@type": "type.googleapis.com/google.protobuf.Duration", +// "value": "1.212s" +// } +// +message Any { + // A URL/resource name whose content describes the type of the + // serialized protocol buffer message. + // + // For URLs which use the scheme `http`, `https`, or no scheme, the + // following restrictions and interpretations apply: + // + // * If no scheme is provided, `https` is assumed. + // * The last segment of the URL's path must represent the fully + // qualified name of the type (as in `path/google.protobuf.Duration`). + // The name should be in a canonical form (e.g., leading "." is + // not accepted). + // * An HTTP GET on the URL must yield a [google.protobuf.Type][] + // value in binary format, or produce an error. + // * Applications are allowed to cache lookup results based on the + // URL, or have them precompiled into a binary to avoid any + // lookup. Therefore, binary compatibility needs to be preserved + // on changes to types. (Use versioned type names to manage + // breaking changes.) + // + // Schemes other than `http`, `https` (or the empty scheme) might be + // used with implementation specific semantics. + // + string type_url = 1; + + // Must be a valid serialized protocol buffer of the above specified type. + bytes value = 2; +} diff --git a/bin/protoc-3.5.1-win32/include/google/protobuf/api.proto b/bin/protoc-3.5.1-win32/include/google/protobuf/api.proto new file mode 100644 index 0000000..f37ee2f --- /dev/null +++ b/bin/protoc-3.5.1-win32/include/google/protobuf/api.proto @@ -0,0 +1,210 @@ +// Protocol Buffers - Google's data interchange format +// Copyright 2008 Google Inc. All rights reserved. +// https://developers.google.com/protocol-buffers/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +syntax = "proto3"; + +package google.protobuf; + +import "google/protobuf/source_context.proto"; +import "google/protobuf/type.proto"; + +option csharp_namespace = "Google.Protobuf.WellKnownTypes"; +option java_package = "com.google.protobuf"; +option java_outer_classname = "ApiProto"; +option java_multiple_files = true; +option objc_class_prefix = "GPB"; +option go_package = "google.golang.org/genproto/protobuf/api;api"; + +// Api is a light-weight descriptor for an API Interface. +// +// Interfaces are also described as "protocol buffer services" in some contexts, +// such as by the "service" keyword in a .proto file, but they are different +// from API Services, which represent a concrete implementation of an interface +// as opposed to simply a description of methods and bindings. They are also +// sometimes simply referred to as "APIs" in other contexts, such as the name of +// this message itself. See https://cloud.google.com/apis/design/glossary for +// detailed terminology. +message Api { + + // The fully qualified name of this interface, including package name + // followed by the interface's simple name. + string name = 1; + + // The methods of this interface, in unspecified order. + repeated Method methods = 2; + + // Any metadata attached to the interface. + repeated Option options = 3; + + // A version string for this interface. If specified, must have the form + // `major-version.minor-version`, as in `1.10`. If the minor version is + // omitted, it defaults to zero. If the entire version field is empty, the + // major version is derived from the package name, as outlined below. If the + // field is not empty, the version in the package name will be verified to be + // consistent with what is provided here. + // + // The versioning schema uses [semantic + // versioning](http://semver.org) where the major version number + // indicates a breaking change and the minor version an additive, + // non-breaking change. Both version numbers are signals to users + // what to expect from different versions, and should be carefully + // chosen based on the product plan. + // + // The major version is also reflected in the package name of the + // interface, which must end in `v`, as in + // `google.feature.v1`. For major versions 0 and 1, the suffix can + // be omitted. Zero major versions must only be used for + // experimental, non-GA interfaces. + // + // + string version = 4; + + // Source context for the protocol buffer service represented by this + // message. + SourceContext source_context = 5; + + // Included interfaces. See [Mixin][]. + repeated Mixin mixins = 6; + + // The source syntax of the service. + Syntax syntax = 7; +} + +// Method represents a method of an API interface. +message Method { + + // The simple name of this method. + string name = 1; + + // A URL of the input message type. + string request_type_url = 2; + + // If true, the request is streamed. + bool request_streaming = 3; + + // The URL of the output message type. + string response_type_url = 4; + + // If true, the response is streamed. + bool response_streaming = 5; + + // Any metadata attached to the method. + repeated Option options = 6; + + // The source syntax of this method. + Syntax syntax = 7; +} + +// Declares an API Interface to be included in this interface. The including +// interface must redeclare all the methods from the included interface, but +// documentation and options are inherited as follows: +// +// - If after comment and whitespace stripping, the documentation +// string of the redeclared method is empty, it will be inherited +// from the original method. +// +// - Each annotation belonging to the service config (http, +// visibility) which is not set in the redeclared method will be +// inherited. +// +// - If an http annotation is inherited, the path pattern will be +// modified as follows. Any version prefix will be replaced by the +// version of the including interface plus the [root][] path if +// specified. +// +// Example of a simple mixin: +// +// package google.acl.v1; +// service AccessControl { +// // Get the underlying ACL object. +// rpc GetAcl(GetAclRequest) returns (Acl) { +// option (google.api.http).get = "/v1/{resource=**}:getAcl"; +// } +// } +// +// package google.storage.v2; +// service Storage { +// rpc GetAcl(GetAclRequest) returns (Acl); +// +// // Get a data record. +// rpc GetData(GetDataRequest) returns (Data) { +// option (google.api.http).get = "/v2/{resource=**}"; +// } +// } +// +// Example of a mixin configuration: +// +// apis: +// - name: google.storage.v2.Storage +// mixins: +// - name: google.acl.v1.AccessControl +// +// The mixin construct implies that all methods in `AccessControl` are +// also declared with same name and request/response types in +// `Storage`. A documentation generator or annotation processor will +// see the effective `Storage.GetAcl` method after inherting +// documentation and annotations as follows: +// +// service Storage { +// // Get the underlying ACL object. +// rpc GetAcl(GetAclRequest) returns (Acl) { +// option (google.api.http).get = "/v2/{resource=**}:getAcl"; +// } +// ... +// } +// +// Note how the version in the path pattern changed from `v1` to `v2`. +// +// If the `root` field in the mixin is specified, it should be a +// relative path under which inherited HTTP paths are placed. Example: +// +// apis: +// - name: google.storage.v2.Storage +// mixins: +// - name: google.acl.v1.AccessControl +// root: acls +// +// This implies the following inherited HTTP annotation: +// +// service Storage { +// // Get the underlying ACL object. +// rpc GetAcl(GetAclRequest) returns (Acl) { +// option (google.api.http).get = "/v2/acls/{resource=**}:getAcl"; +// } +// ... +// } +message Mixin { + // The fully qualified name of the interface which is included. + string name = 1; + + // If non-empty specifies a path under which inherited HTTP paths + // are rooted. + string root = 2; +} diff --git a/bin/protoc-3.5.1-win32/include/google/protobuf/compiler/plugin.proto b/bin/protoc-3.5.1-win32/include/google/protobuf/compiler/plugin.proto new file mode 100644 index 0000000..5b55745 --- /dev/null +++ b/bin/protoc-3.5.1-win32/include/google/protobuf/compiler/plugin.proto @@ -0,0 +1,167 @@ +// Protocol Buffers - Google's data interchange format +// Copyright 2008 Google Inc. All rights reserved. +// https://developers.google.com/protocol-buffers/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Author: kenton@google.com (Kenton Varda) +// +// WARNING: The plugin interface is currently EXPERIMENTAL and is subject to +// change. +// +// protoc (aka the Protocol Compiler) can be extended via plugins. A plugin is +// just a program that reads a CodeGeneratorRequest from stdin and writes a +// CodeGeneratorResponse to stdout. +// +// Plugins written using C++ can use google/protobuf/compiler/plugin.h instead +// of dealing with the raw protocol defined here. +// +// A plugin executable needs only to be placed somewhere in the path. The +// plugin should be named "protoc-gen-$NAME", and will then be used when the +// flag "--${NAME}_out" is passed to protoc. + +syntax = "proto2"; +package google.protobuf.compiler; +option java_package = "com.google.protobuf.compiler"; +option java_outer_classname = "PluginProtos"; + +option go_package = "github.com/golang/protobuf/protoc-gen-go/plugin;plugin_go"; + +import "google/protobuf/descriptor.proto"; + +// The version number of protocol compiler. +message Version { + optional int32 major = 1; + optional int32 minor = 2; + optional int32 patch = 3; + // A suffix for alpha, beta or rc release, e.g., "alpha-1", "rc2". It should + // be empty for mainline stable releases. + optional string suffix = 4; +} + +// An encoded CodeGeneratorRequest is written to the plugin's stdin. +message CodeGeneratorRequest { + // The .proto files that were explicitly listed on the command-line. The + // code generator should generate code only for these files. Each file's + // descriptor will be included in proto_file, below. + repeated string file_to_generate = 1; + + // The generator parameter passed on the command-line. + optional string parameter = 2; + + // FileDescriptorProtos for all files in files_to_generate and everything + // they import. The files will appear in topological order, so each file + // appears before any file that imports it. + // + // protoc guarantees that all proto_files will be written after + // the fields above, even though this is not technically guaranteed by the + // protobuf wire format. This theoretically could allow a plugin to stream + // in the FileDescriptorProtos and handle them one by one rather than read + // the entire set into memory at once. However, as of this writing, this + // is not similarly optimized on protoc's end -- it will store all fields in + // memory at once before sending them to the plugin. + // + // Type names of fields and extensions in the FileDescriptorProto are always + // fully qualified. + repeated FileDescriptorProto proto_file = 15; + + // The version number of protocol compiler. + optional Version compiler_version = 3; + +} + +// The plugin writes an encoded CodeGeneratorResponse to stdout. +message CodeGeneratorResponse { + // Error message. If non-empty, code generation failed. The plugin process + // should exit with status code zero even if it reports an error in this way. + // + // This should be used to indicate errors in .proto files which prevent the + // code generator from generating correct code. Errors which indicate a + // problem in protoc itself -- such as the input CodeGeneratorRequest being + // unparseable -- should be reported by writing a message to stderr and + // exiting with a non-zero status code. + optional string error = 1; + + // Represents a single generated file. + message File { + // The file name, relative to the output directory. The name must not + // contain "." or ".." components and must be relative, not be absolute (so, + // the file cannot lie outside the output directory). "/" must be used as + // the path separator, not "\". + // + // If the name is omitted, the content will be appended to the previous + // file. This allows the generator to break large files into small chunks, + // and allows the generated text to be streamed back to protoc so that large + // files need not reside completely in memory at one time. Note that as of + // this writing protoc does not optimize for this -- it will read the entire + // CodeGeneratorResponse before writing files to disk. + optional string name = 1; + + // If non-empty, indicates that the named file should already exist, and the + // content here is to be inserted into that file at a defined insertion + // point. This feature allows a code generator to extend the output + // produced by another code generator. The original generator may provide + // insertion points by placing special annotations in the file that look + // like: + // @@protoc_insertion_point(NAME) + // The annotation can have arbitrary text before and after it on the line, + // which allows it to be placed in a comment. NAME should be replaced with + // an identifier naming the point -- this is what other generators will use + // as the insertion_point. Code inserted at this point will be placed + // immediately above the line containing the insertion point (thus multiple + // insertions to the same point will come out in the order they were added). + // The double-@ is intended to make it unlikely that the generated code + // could contain things that look like insertion points by accident. + // + // For example, the C++ code generator places the following line in the + // .pb.h files that it generates: + // // @@protoc_insertion_point(namespace_scope) + // This line appears within the scope of the file's package namespace, but + // outside of any particular class. Another plugin can then specify the + // insertion_point "namespace_scope" to generate additional classes or + // other declarations that should be placed in this scope. + // + // Note that if the line containing the insertion point begins with + // whitespace, the same whitespace will be added to every line of the + // inserted text. This is useful for languages like Python, where + // indentation matters. In these languages, the insertion point comment + // should be indented the same amount as any inserted code will need to be + // in order to work correctly in that context. + // + // The code generator that generates the initial file and the one which + // inserts into it must both run as part of a single invocation of protoc. + // Code generators are executed in the order in which they appear on the + // command line. + // + // If |insertion_point| is present, |name| must also be present. + optional string insertion_point = 2; + + // The file contents. + optional string content = 15; + } + repeated File file = 15; +} diff --git a/bin/protoc-3.5.1-win32/include/google/protobuf/descriptor.proto b/bin/protoc-3.5.1-win32/include/google/protobuf/descriptor.proto new file mode 100644 index 0000000..8697a50 --- /dev/null +++ b/bin/protoc-3.5.1-win32/include/google/protobuf/descriptor.proto @@ -0,0 +1,872 @@ +// Protocol Buffers - Google's data interchange format +// Copyright 2008 Google Inc. All rights reserved. +// https://developers.google.com/protocol-buffers/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Author: kenton@google.com (Kenton Varda) +// Based on original Protocol Buffers design by +// Sanjay Ghemawat, Jeff Dean, and others. +// +// The messages in this file describe the definitions found in .proto files. +// A valid .proto file can be translated directly to a FileDescriptorProto +// without any other information (e.g. without reading its imports). + + +syntax = "proto2"; + +package google.protobuf; +option go_package = "github.com/golang/protobuf/protoc-gen-go/descriptor;descriptor"; +option java_package = "com.google.protobuf"; +option java_outer_classname = "DescriptorProtos"; +option csharp_namespace = "Google.Protobuf.Reflection"; +option objc_class_prefix = "GPB"; +option cc_enable_arenas = true; + +// descriptor.proto must be optimized for speed because reflection-based +// algorithms don't work during bootstrapping. +option optimize_for = SPEED; + +// The protocol compiler can output a FileDescriptorSet containing the .proto +// files it parses. +message FileDescriptorSet { + repeated FileDescriptorProto file = 1; +} + +// Describes a complete .proto file. +message FileDescriptorProto { + optional string name = 1; // file name, relative to root of source tree + optional string package = 2; // e.g. "foo", "foo.bar", etc. + + // Names of files imported by this file. + repeated string dependency = 3; + // Indexes of the public imported files in the dependency list above. + repeated int32 public_dependency = 10; + // Indexes of the weak imported files in the dependency list. + // For Google-internal migration only. Do not use. + repeated int32 weak_dependency = 11; + + // All top-level definitions in this file. + repeated DescriptorProto message_type = 4; + repeated EnumDescriptorProto enum_type = 5; + repeated ServiceDescriptorProto service = 6; + repeated FieldDescriptorProto extension = 7; + + optional FileOptions options = 8; + + // This field contains optional information about the original source code. + // You may safely remove this entire field without harming runtime + // functionality of the descriptors -- the information is needed only by + // development tools. + optional SourceCodeInfo source_code_info = 9; + + // The syntax of the proto file. + // The supported values are "proto2" and "proto3". + optional string syntax = 12; +} + +// Describes a message type. +message DescriptorProto { + optional string name = 1; + + repeated FieldDescriptorProto field = 2; + repeated FieldDescriptorProto extension = 6; + + repeated DescriptorProto nested_type = 3; + repeated EnumDescriptorProto enum_type = 4; + + message ExtensionRange { + optional int32 start = 1; + optional int32 end = 2; + + optional ExtensionRangeOptions options = 3; + } + repeated ExtensionRange extension_range = 5; + + repeated OneofDescriptorProto oneof_decl = 8; + + optional MessageOptions options = 7; + + // Range of reserved tag numbers. Reserved tag numbers may not be used by + // fields or extension ranges in the same message. Reserved ranges may + // not overlap. + message ReservedRange { + optional int32 start = 1; // Inclusive. + optional int32 end = 2; // Exclusive. + } + repeated ReservedRange reserved_range = 9; + // Reserved field names, which may not be used by fields in the same message. + // A given name may only be reserved once. + repeated string reserved_name = 10; +} + +message ExtensionRangeOptions { + // The parser stores options it doesn't recognize here. See above. + repeated UninterpretedOption uninterpreted_option = 999; + + // Clients can define custom options in extensions of this message. See above. + extensions 1000 to max; +} + +// Describes a field within a message. +message FieldDescriptorProto { + enum Type { + // 0 is reserved for errors. + // Order is weird for historical reasons. + TYPE_DOUBLE = 1; + TYPE_FLOAT = 2; + // Not ZigZag encoded. Negative numbers take 10 bytes. Use TYPE_SINT64 if + // negative values are likely. + TYPE_INT64 = 3; + TYPE_UINT64 = 4; + // Not ZigZag encoded. Negative numbers take 10 bytes. Use TYPE_SINT32 if + // negative values are likely. + TYPE_INT32 = 5; + TYPE_FIXED64 = 6; + TYPE_FIXED32 = 7; + TYPE_BOOL = 8; + TYPE_STRING = 9; + // Tag-delimited aggregate. + // Group type is deprecated and not supported in proto3. However, Proto3 + // implementations should still be able to parse the group wire format and + // treat group fields as unknown fields. + TYPE_GROUP = 10; + TYPE_MESSAGE = 11; // Length-delimited aggregate. + + // New in version 2. + TYPE_BYTES = 12; + TYPE_UINT32 = 13; + TYPE_ENUM = 14; + TYPE_SFIXED32 = 15; + TYPE_SFIXED64 = 16; + TYPE_SINT32 = 17; // Uses ZigZag encoding. + TYPE_SINT64 = 18; // Uses ZigZag encoding. + }; + + enum Label { + // 0 is reserved for errors + LABEL_OPTIONAL = 1; + LABEL_REQUIRED = 2; + LABEL_REPEATED = 3; + }; + + optional string name = 1; + optional int32 number = 3; + optional Label label = 4; + + // If type_name is set, this need not be set. If both this and type_name + // are set, this must be one of TYPE_ENUM, TYPE_MESSAGE or TYPE_GROUP. + optional Type type = 5; + + // For message and enum types, this is the name of the type. If the name + // starts with a '.', it is fully-qualified. Otherwise, C++-like scoping + // rules are used to find the type (i.e. first the nested types within this + // message are searched, then within the parent, on up to the root + // namespace). + optional string type_name = 6; + + // For extensions, this is the name of the type being extended. It is + // resolved in the same manner as type_name. + optional string extendee = 2; + + // For numeric types, contains the original text representation of the value. + // For booleans, "true" or "false". + // For strings, contains the default text contents (not escaped in any way). + // For bytes, contains the C escaped value. All bytes >= 128 are escaped. + // TODO(kenton): Base-64 encode? + optional string default_value = 7; + + // If set, gives the index of a oneof in the containing type's oneof_decl + // list. This field is a member of that oneof. + optional int32 oneof_index = 9; + + // JSON name of this field. The value is set by protocol compiler. If the + // user has set a "json_name" option on this field, that option's value + // will be used. Otherwise, it's deduced from the field's name by converting + // it to camelCase. + optional string json_name = 10; + + optional FieldOptions options = 8; +} + +// Describes a oneof. +message OneofDescriptorProto { + optional string name = 1; + optional OneofOptions options = 2; +} + +// Describes an enum type. +message EnumDescriptorProto { + optional string name = 1; + + repeated EnumValueDescriptorProto value = 2; + + optional EnumOptions options = 3; + + // Range of reserved numeric values. Reserved values may not be used by + // entries in the same enum. Reserved ranges may not overlap. + // + // Note that this is distinct from DescriptorProto.ReservedRange in that it + // is inclusive such that it can appropriately represent the entire int32 + // domain. + message EnumReservedRange { + optional int32 start = 1; // Inclusive. + optional int32 end = 2; // Inclusive. + } + + // Range of reserved numeric values. Reserved numeric values may not be used + // by enum values in the same enum declaration. Reserved ranges may not + // overlap. + repeated EnumReservedRange reserved_range = 4; + + // Reserved enum value names, which may not be reused. A given name may only + // be reserved once. + repeated string reserved_name = 5; +} + +// Describes a value within an enum. +message EnumValueDescriptorProto { + optional string name = 1; + optional int32 number = 2; + + optional EnumValueOptions options = 3; +} + +// Describes a service. +message ServiceDescriptorProto { + optional string name = 1; + repeated MethodDescriptorProto method = 2; + + optional ServiceOptions options = 3; +} + +// Describes a method of a service. +message MethodDescriptorProto { + optional string name = 1; + + // Input and output type names. These are resolved in the same way as + // FieldDescriptorProto.type_name, but must refer to a message type. + optional string input_type = 2; + optional string output_type = 3; + + optional MethodOptions options = 4; + + // Identifies if client streams multiple client messages + optional bool client_streaming = 5 [default=false]; + // Identifies if server streams multiple server messages + optional bool server_streaming = 6 [default=false]; +} + + +// =================================================================== +// Options + +// Each of the definitions above may have "options" attached. These are +// just annotations which may cause code to be generated slightly differently +// or may contain hints for code that manipulates protocol messages. +// +// Clients may define custom options as extensions of the *Options messages. +// These extensions may not yet be known at parsing time, so the parser cannot +// store the values in them. Instead it stores them in a field in the *Options +// message called uninterpreted_option. This field must have the same name +// across all *Options messages. We then use this field to populate the +// extensions when we build a descriptor, at which point all protos have been +// parsed and so all extensions are known. +// +// Extension numbers for custom options may be chosen as follows: +// * For options which will only be used within a single application or +// organization, or for experimental options, use field numbers 50000 +// through 99999. It is up to you to ensure that you do not use the +// same number for multiple options. +// * For options which will be published and used publicly by multiple +// independent entities, e-mail protobuf-global-extension-registry@google.com +// to reserve extension numbers. Simply provide your project name (e.g. +// Objective-C plugin) and your project website (if available) -- there's no +// need to explain how you intend to use them. Usually you only need one +// extension number. You can declare multiple options with only one extension +// number by putting them in a sub-message. See the Custom Options section of +// the docs for examples: +// https://developers.google.com/protocol-buffers/docs/proto#options +// If this turns out to be popular, a web service will be set up +// to automatically assign option numbers. + + +message FileOptions { + + // Sets the Java package where classes generated from this .proto will be + // placed. By default, the proto package is used, but this is often + // inappropriate because proto packages do not normally start with backwards + // domain names. + optional string java_package = 1; + + + // If set, all the classes from the .proto file are wrapped in a single + // outer class with the given name. This applies to both Proto1 + // (equivalent to the old "--one_java_file" option) and Proto2 (where + // a .proto always translates to a single class, but you may want to + // explicitly choose the class name). + optional string java_outer_classname = 8; + + // If set true, then the Java code generator will generate a separate .java + // file for each top-level message, enum, and service defined in the .proto + // file. Thus, these types will *not* be nested inside the outer class + // named by java_outer_classname. However, the outer class will still be + // generated to contain the file's getDescriptor() method as well as any + // top-level extensions defined in the file. + optional bool java_multiple_files = 10 [default=false]; + + // This option does nothing. + optional bool java_generate_equals_and_hash = 20 [deprecated=true]; + + // If set true, then the Java2 code generator will generate code that + // throws an exception whenever an attempt is made to assign a non-UTF-8 + // byte sequence to a string field. + // Message reflection will do the same. + // However, an extension field still accepts non-UTF-8 byte sequences. + // This option has no effect on when used with the lite runtime. + optional bool java_string_check_utf8 = 27 [default=false]; + + + // Generated classes can be optimized for speed or code size. + enum OptimizeMode { + SPEED = 1; // Generate complete code for parsing, serialization, + // etc. + CODE_SIZE = 2; // Use ReflectionOps to implement these methods. + LITE_RUNTIME = 3; // Generate code using MessageLite and the lite runtime. + } + optional OptimizeMode optimize_for = 9 [default=SPEED]; + + // Sets the Go package where structs generated from this .proto will be + // placed. If omitted, the Go package will be derived from the following: + // - The basename of the package import path, if provided. + // - Otherwise, the package statement in the .proto file, if present. + // - Otherwise, the basename of the .proto file, without extension. + optional string go_package = 11; + + + + // Should generic services be generated in each language? "Generic" services + // are not specific to any particular RPC system. They are generated by the + // main code generators in each language (without additional plugins). + // Generic services were the only kind of service generation supported by + // early versions of google.protobuf. + // + // Generic services are now considered deprecated in favor of using plugins + // that generate code specific to your particular RPC system. Therefore, + // these default to false. Old code which depends on generic services should + // explicitly set them to true. + optional bool cc_generic_services = 16 [default=false]; + optional bool java_generic_services = 17 [default=false]; + optional bool py_generic_services = 18 [default=false]; + optional bool php_generic_services = 42 [default=false]; + + // Is this file deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for everything in the file, or it will be completely ignored; in the very + // least, this is a formalization for deprecating files. + optional bool deprecated = 23 [default=false]; + + // Enables the use of arenas for the proto messages in this file. This applies + // only to generated classes for C++. + optional bool cc_enable_arenas = 31 [default=false]; + + + // Sets the objective c class prefix which is prepended to all objective c + // generated classes from this .proto. There is no default. + optional string objc_class_prefix = 36; + + // Namespace for generated classes; defaults to the package. + optional string csharp_namespace = 37; + + // By default Swift generators will take the proto package and CamelCase it + // replacing '.' with underscore and use that to prefix the types/symbols + // defined. When this options is provided, they will use this value instead + // to prefix the types/symbols defined. + optional string swift_prefix = 39; + + // Sets the php class prefix which is prepended to all php generated classes + // from this .proto. Default is empty. + optional string php_class_prefix = 40; + + // Use this option to change the namespace of php generated classes. Default + // is empty. When this option is empty, the package name will be used for + // determining the namespace. + optional string php_namespace = 41; + + // The parser stores options it doesn't recognize here. + // See the documentation for the "Options" section above. + repeated UninterpretedOption uninterpreted_option = 999; + + // Clients can define custom options in extensions of this message. + // See the documentation for the "Options" section above. + extensions 1000 to max; + + reserved 38; +} + +message MessageOptions { + // Set true to use the old proto1 MessageSet wire format for extensions. + // This is provided for backwards-compatibility with the MessageSet wire + // format. You should not use this for any other reason: It's less + // efficient, has fewer features, and is more complicated. + // + // The message must be defined exactly as follows: + // message Foo { + // option message_set_wire_format = true; + // extensions 4 to max; + // } + // Note that the message cannot have any defined fields; MessageSets only + // have extensions. + // + // All extensions of your type must be singular messages; e.g. they cannot + // be int32s, enums, or repeated messages. + // + // Because this is an option, the above two restrictions are not enforced by + // the protocol compiler. + optional bool message_set_wire_format = 1 [default=false]; + + // Disables the generation of the standard "descriptor()" accessor, which can + // conflict with a field of the same name. This is meant to make migration + // from proto1 easier; new code should avoid fields named "descriptor". + optional bool no_standard_descriptor_accessor = 2 [default=false]; + + // Is this message deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for the message, or it will be completely ignored; in the very least, + // this is a formalization for deprecating messages. + optional bool deprecated = 3 [default=false]; + + // Whether the message is an automatically generated map entry type for the + // maps field. + // + // For maps fields: + // map map_field = 1; + // The parsed descriptor looks like: + // message MapFieldEntry { + // option map_entry = true; + // optional KeyType key = 1; + // optional ValueType value = 2; + // } + // repeated MapFieldEntry map_field = 1; + // + // Implementations may choose not to generate the map_entry=true message, but + // use a native map in the target language to hold the keys and values. + // The reflection APIs in such implementions still need to work as + // if the field is a repeated message field. + // + // NOTE: Do not set the option in .proto files. Always use the maps syntax + // instead. The option should only be implicitly set by the proto compiler + // parser. + optional bool map_entry = 7; + + reserved 8; // javalite_serializable + reserved 9; // javanano_as_lite + + // The parser stores options it doesn't recognize here. See above. + repeated UninterpretedOption uninterpreted_option = 999; + + // Clients can define custom options in extensions of this message. See above. + extensions 1000 to max; +} + +message FieldOptions { + // The ctype option instructs the C++ code generator to use a different + // representation of the field than it normally would. See the specific + // options below. This option is not yet implemented in the open source + // release -- sorry, we'll try to include it in a future version! + optional CType ctype = 1 [default = STRING]; + enum CType { + // Default mode. + STRING = 0; + + CORD = 1; + + STRING_PIECE = 2; + } + // The packed option can be enabled for repeated primitive fields to enable + // a more efficient representation on the wire. Rather than repeatedly + // writing the tag and type for each element, the entire array is encoded as + // a single length-delimited blob. In proto3, only explicit setting it to + // false will avoid using packed encoding. + optional bool packed = 2; + + // The jstype option determines the JavaScript type used for values of the + // field. The option is permitted only for 64 bit integral and fixed types + // (int64, uint64, sint64, fixed64, sfixed64). A field with jstype JS_STRING + // is represented as JavaScript string, which avoids loss of precision that + // can happen when a large value is converted to a floating point JavaScript. + // Specifying JS_NUMBER for the jstype causes the generated JavaScript code to + // use the JavaScript "number" type. The behavior of the default option + // JS_NORMAL is implementation dependent. + // + // This option is an enum to permit additional types to be added, e.g. + // goog.math.Integer. + optional JSType jstype = 6 [default = JS_NORMAL]; + enum JSType { + // Use the default type. + JS_NORMAL = 0; + + // Use JavaScript strings. + JS_STRING = 1; + + // Use JavaScript numbers. + JS_NUMBER = 2; + } + + // Should this field be parsed lazily? Lazy applies only to message-type + // fields. It means that when the outer message is initially parsed, the + // inner message's contents will not be parsed but instead stored in encoded + // form. The inner message will actually be parsed when it is first accessed. + // + // This is only a hint. Implementations are free to choose whether to use + // eager or lazy parsing regardless of the value of this option. However, + // setting this option true suggests that the protocol author believes that + // using lazy parsing on this field is worth the additional bookkeeping + // overhead typically needed to implement it. + // + // This option does not affect the public interface of any generated code; + // all method signatures remain the same. Furthermore, thread-safety of the + // interface is not affected by this option; const methods remain safe to + // call from multiple threads concurrently, while non-const methods continue + // to require exclusive access. + // + // + // Note that implementations may choose not to check required fields within + // a lazy sub-message. That is, calling IsInitialized() on the outer message + // may return true even if the inner message has missing required fields. + // This is necessary because otherwise the inner message would have to be + // parsed in order to perform the check, defeating the purpose of lazy + // parsing. An implementation which chooses not to check required fields + // must be consistent about it. That is, for any particular sub-message, the + // implementation must either *always* check its required fields, or *never* + // check its required fields, regardless of whether or not the message has + // been parsed. + optional bool lazy = 5 [default=false]; + + // Is this field deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for accessors, or it will be completely ignored; in the very least, this + // is a formalization for deprecating fields. + optional bool deprecated = 3 [default=false]; + + // For Google-internal migration only. Do not use. + optional bool weak = 10 [default=false]; + + + // The parser stores options it doesn't recognize here. See above. + repeated UninterpretedOption uninterpreted_option = 999; + + // Clients can define custom options in extensions of this message. See above. + extensions 1000 to max; + + reserved 4; // removed jtype +} + +message OneofOptions { + // The parser stores options it doesn't recognize here. See above. + repeated UninterpretedOption uninterpreted_option = 999; + + // Clients can define custom options in extensions of this message. See above. + extensions 1000 to max; +} + +message EnumOptions { + + // Set this option to true to allow mapping different tag names to the same + // value. + optional bool allow_alias = 2; + + // Is this enum deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for the enum, or it will be completely ignored; in the very least, this + // is a formalization for deprecating enums. + optional bool deprecated = 3 [default=false]; + + reserved 5; // javanano_as_lite + + // The parser stores options it doesn't recognize here. See above. + repeated UninterpretedOption uninterpreted_option = 999; + + // Clients can define custom options in extensions of this message. See above. + extensions 1000 to max; +} + +message EnumValueOptions { + // Is this enum value deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for the enum value, or it will be completely ignored; in the very least, + // this is a formalization for deprecating enum values. + optional bool deprecated = 1 [default=false]; + + // The parser stores options it doesn't recognize here. See above. + repeated UninterpretedOption uninterpreted_option = 999; + + // Clients can define custom options in extensions of this message. See above. + extensions 1000 to max; +} + +message ServiceOptions { + + // Note: Field numbers 1 through 32 are reserved for Google's internal RPC + // framework. We apologize for hoarding these numbers to ourselves, but + // we were already using them long before we decided to release Protocol + // Buffers. + + // Is this service deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for the service, or it will be completely ignored; in the very least, + // this is a formalization for deprecating services. + optional bool deprecated = 33 [default=false]; + + // The parser stores options it doesn't recognize here. See above. + repeated UninterpretedOption uninterpreted_option = 999; + + // Clients can define custom options in extensions of this message. See above. + extensions 1000 to max; +} + +message MethodOptions { + + // Note: Field numbers 1 through 32 are reserved for Google's internal RPC + // framework. We apologize for hoarding these numbers to ourselves, but + // we were already using them long before we decided to release Protocol + // Buffers. + + // Is this method deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for the method, or it will be completely ignored; in the very least, + // this is a formalization for deprecating methods. + optional bool deprecated = 33 [default=false]; + + // Is this method side-effect-free (or safe in HTTP parlance), or idempotent, + // or neither? HTTP based RPC implementation may choose GET verb for safe + // methods, and PUT verb for idempotent methods instead of the default POST. + enum IdempotencyLevel { + IDEMPOTENCY_UNKNOWN = 0; + NO_SIDE_EFFECTS = 1; // implies idempotent + IDEMPOTENT = 2; // idempotent, but may have side effects + } + optional IdempotencyLevel idempotency_level = + 34 [default=IDEMPOTENCY_UNKNOWN]; + + // The parser stores options it doesn't recognize here. See above. + repeated UninterpretedOption uninterpreted_option = 999; + + // Clients can define custom options in extensions of this message. See above. + extensions 1000 to max; +} + + +// A message representing a option the parser does not recognize. This only +// appears in options protos created by the compiler::Parser class. +// DescriptorPool resolves these when building Descriptor objects. Therefore, +// options protos in descriptor objects (e.g. returned by Descriptor::options(), +// or produced by Descriptor::CopyTo()) will never have UninterpretedOptions +// in them. +message UninterpretedOption { + // The name of the uninterpreted option. Each string represents a segment in + // a dot-separated name. is_extension is true iff a segment represents an + // extension (denoted with parentheses in options specs in .proto files). + // E.g.,{ ["foo", false], ["bar.baz", true], ["qux", false] } represents + // "foo.(bar.baz).qux". + message NamePart { + required string name_part = 1; + required bool is_extension = 2; + } + repeated NamePart name = 2; + + // The value of the uninterpreted option, in whatever type the tokenizer + // identified it as during parsing. Exactly one of these should be set. + optional string identifier_value = 3; + optional uint64 positive_int_value = 4; + optional int64 negative_int_value = 5; + optional double double_value = 6; + optional bytes string_value = 7; + optional string aggregate_value = 8; +} + +// =================================================================== +// Optional source code info + +// Encapsulates information about the original source file from which a +// FileDescriptorProto was generated. +message SourceCodeInfo { + // A Location identifies a piece of source code in a .proto file which + // corresponds to a particular definition. This information is intended + // to be useful to IDEs, code indexers, documentation generators, and similar + // tools. + // + // For example, say we have a file like: + // message Foo { + // optional string foo = 1; + // } + // Let's look at just the field definition: + // optional string foo = 1; + // ^ ^^ ^^ ^ ^^^ + // a bc de f ghi + // We have the following locations: + // span path represents + // [a,i) [ 4, 0, 2, 0 ] The whole field definition. + // [a,b) [ 4, 0, 2, 0, 4 ] The label (optional). + // [c,d) [ 4, 0, 2, 0, 5 ] The type (string). + // [e,f) [ 4, 0, 2, 0, 1 ] The name (foo). + // [g,h) [ 4, 0, 2, 0, 3 ] The number (1). + // + // Notes: + // - A location may refer to a repeated field itself (i.e. not to any + // particular index within it). This is used whenever a set of elements are + // logically enclosed in a single code segment. For example, an entire + // extend block (possibly containing multiple extension definitions) will + // have an outer location whose path refers to the "extensions" repeated + // field without an index. + // - Multiple locations may have the same path. This happens when a single + // logical declaration is spread out across multiple places. The most + // obvious example is the "extend" block again -- there may be multiple + // extend blocks in the same scope, each of which will have the same path. + // - A location's span is not always a subset of its parent's span. For + // example, the "extendee" of an extension declaration appears at the + // beginning of the "extend" block and is shared by all extensions within + // the block. + // - Just because a location's span is a subset of some other location's span + // does not mean that it is a descendent. For example, a "group" defines + // both a type and a field in a single declaration. Thus, the locations + // corresponding to the type and field and their components will overlap. + // - Code which tries to interpret locations should probably be designed to + // ignore those that it doesn't understand, as more types of locations could + // be recorded in the future. + repeated Location location = 1; + message Location { + // Identifies which part of the FileDescriptorProto was defined at this + // location. + // + // Each element is a field number or an index. They form a path from + // the root FileDescriptorProto to the place where the definition. For + // example, this path: + // [ 4, 3, 2, 7, 1 ] + // refers to: + // file.message_type(3) // 4, 3 + // .field(7) // 2, 7 + // .name() // 1 + // This is because FileDescriptorProto.message_type has field number 4: + // repeated DescriptorProto message_type = 4; + // and DescriptorProto.field has field number 2: + // repeated FieldDescriptorProto field = 2; + // and FieldDescriptorProto.name has field number 1: + // optional string name = 1; + // + // Thus, the above path gives the location of a field name. If we removed + // the last element: + // [ 4, 3, 2, 7 ] + // this path refers to the whole field declaration (from the beginning + // of the label to the terminating semicolon). + repeated int32 path = 1 [packed=true]; + + // Always has exactly three or four elements: start line, start column, + // end line (optional, otherwise assumed same as start line), end column. + // These are packed into a single field for efficiency. Note that line + // and column numbers are zero-based -- typically you will want to add + // 1 to each before displaying to a user. + repeated int32 span = 2 [packed=true]; + + // If this SourceCodeInfo represents a complete declaration, these are any + // comments appearing before and after the declaration which appear to be + // attached to the declaration. + // + // A series of line comments appearing on consecutive lines, with no other + // tokens appearing on those lines, will be treated as a single comment. + // + // leading_detached_comments will keep paragraphs of comments that appear + // before (but not connected to) the current element. Each paragraph, + // separated by empty lines, will be one comment element in the repeated + // field. + // + // Only the comment content is provided; comment markers (e.g. //) are + // stripped out. For block comments, leading whitespace and an asterisk + // will be stripped from the beginning of each line other than the first. + // Newlines are included in the output. + // + // Examples: + // + // optional int32 foo = 1; // Comment attached to foo. + // // Comment attached to bar. + // optional int32 bar = 2; + // + // optional string baz = 3; + // // Comment attached to baz. + // // Another line attached to baz. + // + // // Comment attached to qux. + // // + // // Another line attached to qux. + // optional double qux = 4; + // + // // Detached comment for corge. This is not leading or trailing comments + // // to qux or corge because there are blank lines separating it from + // // both. + // + // // Detached comment for corge paragraph 2. + // + // optional string corge = 5; + // /* Block comment attached + // * to corge. Leading asterisks + // * will be removed. */ + // /* Block comment attached to + // * grault. */ + // optional int32 grault = 6; + // + // // ignored detached comments. + optional string leading_comments = 3; + optional string trailing_comments = 4; + repeated string leading_detached_comments = 6; + } +} + +// Describes the relationship between generated code and its original source +// file. A GeneratedCodeInfo message is associated with only one generated +// source file, but may contain references to different source .proto files. +message GeneratedCodeInfo { + // An Annotation connects some span of text in generated code to an element + // of its generating .proto file. + repeated Annotation annotation = 1; + message Annotation { + // Identifies the element in the original source .proto file. This field + // is formatted the same as SourceCodeInfo.Location.path. + repeated int32 path = 1 [packed=true]; + + // Identifies the filesystem path to the original source .proto. + optional string source_file = 2; + + // Identifies the starting offset in bytes in the generated code + // that relates to the identified object. + optional int32 begin = 3; + + // Identifies the ending offset in bytes in the generated code that + // relates to the identified offset. The end offset should be one past + // the last relevant byte (so the length of the text = end - begin). + optional int32 end = 4; + } +} diff --git a/bin/protoc-3.5.1-win32/include/google/protobuf/duration.proto b/bin/protoc-3.5.1-win32/include/google/protobuf/duration.proto new file mode 100644 index 0000000..975fce4 --- /dev/null +++ b/bin/protoc-3.5.1-win32/include/google/protobuf/duration.proto @@ -0,0 +1,117 @@ +// Protocol Buffers - Google's data interchange format +// Copyright 2008 Google Inc. All rights reserved. +// https://developers.google.com/protocol-buffers/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +syntax = "proto3"; + +package google.protobuf; + +option csharp_namespace = "Google.Protobuf.WellKnownTypes"; +option cc_enable_arenas = true; +option go_package = "github.com/golang/protobuf/ptypes/duration"; +option java_package = "com.google.protobuf"; +option java_outer_classname = "DurationProto"; +option java_multiple_files = true; +option objc_class_prefix = "GPB"; + +// A Duration represents a signed, fixed-length span of time represented +// as a count of seconds and fractions of seconds at nanosecond +// resolution. It is independent of any calendar and concepts like "day" +// or "month". It is related to Timestamp in that the difference between +// two Timestamp values is a Duration and it can be added or subtracted +// from a Timestamp. Range is approximately +-10,000 years. +// +// # Examples +// +// Example 1: Compute Duration from two Timestamps in pseudo code. +// +// Timestamp start = ...; +// Timestamp end = ...; +// Duration duration = ...; +// +// duration.seconds = end.seconds - start.seconds; +// duration.nanos = end.nanos - start.nanos; +// +// if (duration.seconds < 0 && duration.nanos > 0) { +// duration.seconds += 1; +// duration.nanos -= 1000000000; +// } else if (durations.seconds > 0 && duration.nanos < 0) { +// duration.seconds -= 1; +// duration.nanos += 1000000000; +// } +// +// Example 2: Compute Timestamp from Timestamp + Duration in pseudo code. +// +// Timestamp start = ...; +// Duration duration = ...; +// Timestamp end = ...; +// +// end.seconds = start.seconds + duration.seconds; +// end.nanos = start.nanos + duration.nanos; +// +// if (end.nanos < 0) { +// end.seconds -= 1; +// end.nanos += 1000000000; +// } else if (end.nanos >= 1000000000) { +// end.seconds += 1; +// end.nanos -= 1000000000; +// } +// +// Example 3: Compute Duration from datetime.timedelta in Python. +// +// td = datetime.timedelta(days=3, minutes=10) +// duration = Duration() +// duration.FromTimedelta(td) +// +// # JSON Mapping +// +// In JSON format, the Duration type is encoded as a string rather than an +// object, where the string ends in the suffix "s" (indicating seconds) and +// is preceded by the number of seconds, with nanoseconds expressed as +// fractional seconds. For example, 3 seconds with 0 nanoseconds should be +// encoded in JSON format as "3s", while 3 seconds and 1 nanosecond should +// be expressed in JSON format as "3.000000001s", and 3 seconds and 1 +// microsecond should be expressed in JSON format as "3.000001s". +// +// +message Duration { + + // Signed seconds of the span of time. Must be from -315,576,000,000 + // to +315,576,000,000 inclusive. Note: these bounds are computed from: + // 60 sec/min * 60 min/hr * 24 hr/day * 365.25 days/year * 10000 years + int64 seconds = 1; + + // Signed fractions of a second at nanosecond resolution of the span + // of time. Durations less than one second are represented with a 0 + // `seconds` field and a positive or negative `nanos` field. For durations + // of one second or more, a non-zero value for the `nanos` field must be + // of the same sign as the `seconds` field. Must be from -999,999,999 + // to +999,999,999 inclusive. + int32 nanos = 2; +} diff --git a/bin/protoc-3.5.1-win32/include/google/protobuf/empty.proto b/bin/protoc-3.5.1-win32/include/google/protobuf/empty.proto new file mode 100644 index 0000000..03cacd2 --- /dev/null +++ b/bin/protoc-3.5.1-win32/include/google/protobuf/empty.proto @@ -0,0 +1,52 @@ +// Protocol Buffers - Google's data interchange format +// Copyright 2008 Google Inc. All rights reserved. +// https://developers.google.com/protocol-buffers/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +syntax = "proto3"; + +package google.protobuf; + +option csharp_namespace = "Google.Protobuf.WellKnownTypes"; +option go_package = "github.com/golang/protobuf/ptypes/empty"; +option java_package = "com.google.protobuf"; +option java_outer_classname = "EmptyProto"; +option java_multiple_files = true; +option objc_class_prefix = "GPB"; +option cc_enable_arenas = true; + +// A generic empty message that you can re-use to avoid defining duplicated +// empty messages in your APIs. A typical example is to use it as the request +// or the response type of an API method. For instance: +// +// service Foo { +// rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty); +// } +// +// The JSON representation for `Empty` is empty JSON object `{}`. +message Empty {} diff --git a/bin/protoc-3.5.1-win32/include/google/protobuf/field_mask.proto b/bin/protoc-3.5.1-win32/include/google/protobuf/field_mask.proto new file mode 100644 index 0000000..eb96ba0 --- /dev/null +++ b/bin/protoc-3.5.1-win32/include/google/protobuf/field_mask.proto @@ -0,0 +1,252 @@ +// Protocol Buffers - Google's data interchange format +// Copyright 2008 Google Inc. All rights reserved. +// https://developers.google.com/protocol-buffers/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +syntax = "proto3"; + +package google.protobuf; + +option csharp_namespace = "Google.Protobuf.WellKnownTypes"; +option java_package = "com.google.protobuf"; +option java_outer_classname = "FieldMaskProto"; +option java_multiple_files = true; +option objc_class_prefix = "GPB"; +option go_package = "google.golang.org/genproto/protobuf/field_mask;field_mask"; + +// `FieldMask` represents a set of symbolic field paths, for example: +// +// paths: "f.a" +// paths: "f.b.d" +// +// Here `f` represents a field in some root message, `a` and `b` +// fields in the message found in `f`, and `d` a field found in the +// message in `f.b`. +// +// Field masks are used to specify a subset of fields that should be +// returned by a get operation or modified by an update operation. +// Field masks also have a custom JSON encoding (see below). +// +// # Field Masks in Projections +// +// When used in the context of a projection, a response message or +// sub-message is filtered by the API to only contain those fields as +// specified in the mask. For example, if the mask in the previous +// example is applied to a response message as follows: +// +// f { +// a : 22 +// b { +// d : 1 +// x : 2 +// } +// y : 13 +// } +// z: 8 +// +// The result will not contain specific values for fields x,y and z +// (their value will be set to the default, and omitted in proto text +// output): +// +// +// f { +// a : 22 +// b { +// d : 1 +// } +// } +// +// A repeated field is not allowed except at the last position of a +// paths string. +// +// If a FieldMask object is not present in a get operation, the +// operation applies to all fields (as if a FieldMask of all fields +// had been specified). +// +// Note that a field mask does not necessarily apply to the +// top-level response message. In case of a REST get operation, the +// field mask applies directly to the response, but in case of a REST +// list operation, the mask instead applies to each individual message +// in the returned resource list. In case of a REST custom method, +// other definitions may be used. Where the mask applies will be +// clearly documented together with its declaration in the API. In +// any case, the effect on the returned resource/resources is required +// behavior for APIs. +// +// # Field Masks in Update Operations +// +// A field mask in update operations specifies which fields of the +// targeted resource are going to be updated. The API is required +// to only change the values of the fields as specified in the mask +// and leave the others untouched. If a resource is passed in to +// describe the updated values, the API ignores the values of all +// fields not covered by the mask. +// +// If a repeated field is specified for an update operation, the existing +// repeated values in the target resource will be overwritten by the new values. +// Note that a repeated field is only allowed in the last position of a `paths` +// string. +// +// If a sub-message is specified in the last position of the field mask for an +// update operation, then the existing sub-message in the target resource is +// overwritten. Given the target message: +// +// f { +// b { +// d : 1 +// x : 2 +// } +// c : 1 +// } +// +// And an update message: +// +// f { +// b { +// d : 10 +// } +// } +// +// then if the field mask is: +// +// paths: "f.b" +// +// then the result will be: +// +// f { +// b { +// d : 10 +// } +// c : 1 +// } +// +// However, if the update mask was: +// +// paths: "f.b.d" +// +// then the result would be: +// +// f { +// b { +// d : 10 +// x : 2 +// } +// c : 1 +// } +// +// In order to reset a field's value to the default, the field must +// be in the mask and set to the default value in the provided resource. +// Hence, in order to reset all fields of a resource, provide a default +// instance of the resource and set all fields in the mask, or do +// not provide a mask as described below. +// +// If a field mask is not present on update, the operation applies to +// all fields (as if a field mask of all fields has been specified). +// Note that in the presence of schema evolution, this may mean that +// fields the client does not know and has therefore not filled into +// the request will be reset to their default. If this is unwanted +// behavior, a specific service may require a client to always specify +// a field mask, producing an error if not. +// +// As with get operations, the location of the resource which +// describes the updated values in the request message depends on the +// operation kind. In any case, the effect of the field mask is +// required to be honored by the API. +// +// ## Considerations for HTTP REST +// +// The HTTP kind of an update operation which uses a field mask must +// be set to PATCH instead of PUT in order to satisfy HTTP semantics +// (PUT must only be used for full updates). +// +// # JSON Encoding of Field Masks +// +// In JSON, a field mask is encoded as a single string where paths are +// separated by a comma. Fields name in each path are converted +// to/from lower-camel naming conventions. +// +// As an example, consider the following message declarations: +// +// message Profile { +// User user = 1; +// Photo photo = 2; +// } +// message User { +// string display_name = 1; +// string address = 2; +// } +// +// In proto a field mask for `Profile` may look as such: +// +// mask { +// paths: "user.display_name" +// paths: "photo" +// } +// +// In JSON, the same mask is represented as below: +// +// { +// mask: "user.displayName,photo" +// } +// +// # Field Masks and Oneof Fields +// +// Field masks treat fields in oneofs just as regular fields. Consider the +// following message: +// +// message SampleMessage { +// oneof test_oneof { +// string name = 4; +// SubMessage sub_message = 9; +// } +// } +// +// The field mask can be: +// +// mask { +// paths: "name" +// } +// +// Or: +// +// mask { +// paths: "sub_message" +// } +// +// Note that oneof type names ("test_oneof" in this case) cannot be used in +// paths. +// +// ## Field Mask Verification +// +// The implementation of the all the API methods, which have any FieldMask type +// field in the request, should verify the included field paths, and return +// `INVALID_ARGUMENT` error if any path is duplicated or unmappable. +message FieldMask { + // The set of field mask paths. + repeated string paths = 1; +} diff --git a/bin/protoc-3.5.1-win32/include/google/protobuf/source_context.proto b/bin/protoc-3.5.1-win32/include/google/protobuf/source_context.proto new file mode 100644 index 0000000..f3b2c96 --- /dev/null +++ b/bin/protoc-3.5.1-win32/include/google/protobuf/source_context.proto @@ -0,0 +1,48 @@ +// Protocol Buffers - Google's data interchange format +// Copyright 2008 Google Inc. All rights reserved. +// https://developers.google.com/protocol-buffers/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +syntax = "proto3"; + +package google.protobuf; + +option csharp_namespace = "Google.Protobuf.WellKnownTypes"; +option java_package = "com.google.protobuf"; +option java_outer_classname = "SourceContextProto"; +option java_multiple_files = true; +option objc_class_prefix = "GPB"; +option go_package = "google.golang.org/genproto/protobuf/source_context;source_context"; + +// `SourceContext` represents information about the source of a +// protobuf element, like the file in which it is defined. +message SourceContext { + // The path-qualified name of the .proto file that contained the associated + // protobuf element. For example: `"google/protobuf/source_context.proto"`. + string file_name = 1; +} diff --git a/bin/protoc-3.5.1-win32/include/google/protobuf/struct.proto b/bin/protoc-3.5.1-win32/include/google/protobuf/struct.proto new file mode 100644 index 0000000..7d7808e --- /dev/null +++ b/bin/protoc-3.5.1-win32/include/google/protobuf/struct.proto @@ -0,0 +1,96 @@ +// Protocol Buffers - Google's data interchange format +// Copyright 2008 Google Inc. All rights reserved. +// https://developers.google.com/protocol-buffers/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +syntax = "proto3"; + +package google.protobuf; + +option csharp_namespace = "Google.Protobuf.WellKnownTypes"; +option cc_enable_arenas = true; +option go_package = "github.com/golang/protobuf/ptypes/struct;structpb"; +option java_package = "com.google.protobuf"; +option java_outer_classname = "StructProto"; +option java_multiple_files = true; +option objc_class_prefix = "GPB"; + + +// `Struct` represents a structured data value, consisting of fields +// which map to dynamically typed values. In some languages, `Struct` +// might be supported by a native representation. For example, in +// scripting languages like JS a struct is represented as an +// object. The details of that representation are described together +// with the proto support for the language. +// +// The JSON representation for `Struct` is JSON object. +message Struct { + // Unordered map of dynamically typed values. + map fields = 1; +} + +// `Value` represents a dynamically typed value which can be either +// null, a number, a string, a boolean, a recursive struct value, or a +// list of values. A producer of value is expected to set one of that +// variants, absence of any variant indicates an error. +// +// The JSON representation for `Value` is JSON value. +message Value { + // The kind of value. + oneof kind { + // Represents a null value. + NullValue null_value = 1; + // Represents a double value. + double number_value = 2; + // Represents a string value. + string string_value = 3; + // Represents a boolean value. + bool bool_value = 4; + // Represents a structured value. + Struct struct_value = 5; + // Represents a repeated `Value`. + ListValue list_value = 6; + } +} + +// `NullValue` is a singleton enumeration to represent the null value for the +// `Value` type union. +// +// The JSON representation for `NullValue` is JSON `null`. +enum NullValue { + // Null value. + NULL_VALUE = 0; +} + +// `ListValue` is a wrapper around a repeated field of values. +// +// The JSON representation for `ListValue` is JSON array. +message ListValue { + // Repeated field of dynamically typed values. + repeated Value values = 1; +} diff --git a/bin/protoc-3.5.1-win32/include/google/protobuf/timestamp.proto b/bin/protoc-3.5.1-win32/include/google/protobuf/timestamp.proto new file mode 100644 index 0000000..06750ab --- /dev/null +++ b/bin/protoc-3.5.1-win32/include/google/protobuf/timestamp.proto @@ -0,0 +1,133 @@ +// Protocol Buffers - Google's data interchange format +// Copyright 2008 Google Inc. All rights reserved. +// https://developers.google.com/protocol-buffers/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +syntax = "proto3"; + +package google.protobuf; + +option csharp_namespace = "Google.Protobuf.WellKnownTypes"; +option cc_enable_arenas = true; +option go_package = "github.com/golang/protobuf/ptypes/timestamp"; +option java_package = "com.google.protobuf"; +option java_outer_classname = "TimestampProto"; +option java_multiple_files = true; +option objc_class_prefix = "GPB"; + +// A Timestamp represents a point in time independent of any time zone +// or calendar, represented as seconds and fractions of seconds at +// nanosecond resolution in UTC Epoch time. It is encoded using the +// Proleptic Gregorian Calendar which extends the Gregorian calendar +// backwards to year one. It is encoded assuming all minutes are 60 +// seconds long, i.e. leap seconds are "smeared" so that no leap second +// table is needed for interpretation. Range is from +// 0001-01-01T00:00:00Z to 9999-12-31T23:59:59.999999999Z. +// By restricting to that range, we ensure that we can convert to +// and from RFC 3339 date strings. +// See [https://www.ietf.org/rfc/rfc3339.txt](https://www.ietf.org/rfc/rfc3339.txt). +// +// # Examples +// +// Example 1: Compute Timestamp from POSIX `time()`. +// +// Timestamp timestamp; +// timestamp.set_seconds(time(NULL)); +// timestamp.set_nanos(0); +// +// Example 2: Compute Timestamp from POSIX `gettimeofday()`. +// +// struct timeval tv; +// gettimeofday(&tv, NULL); +// +// Timestamp timestamp; +// timestamp.set_seconds(tv.tv_sec); +// timestamp.set_nanos(tv.tv_usec * 1000); +// +// Example 3: Compute Timestamp from Win32 `GetSystemTimeAsFileTime()`. +// +// FILETIME ft; +// GetSystemTimeAsFileTime(&ft); +// UINT64 ticks = (((UINT64)ft.dwHighDateTime) << 32) | ft.dwLowDateTime; +// +// // A Windows tick is 100 nanoseconds. Windows epoch 1601-01-01T00:00:00Z +// // is 11644473600 seconds before Unix epoch 1970-01-01T00:00:00Z. +// Timestamp timestamp; +// timestamp.set_seconds((INT64) ((ticks / 10000000) - 11644473600LL)); +// timestamp.set_nanos((INT32) ((ticks % 10000000) * 100)); +// +// Example 4: Compute Timestamp from Java `System.currentTimeMillis()`. +// +// long millis = System.currentTimeMillis(); +// +// Timestamp timestamp = Timestamp.newBuilder().setSeconds(millis / 1000) +// .setNanos((int) ((millis % 1000) * 1000000)).build(); +// +// +// Example 5: Compute Timestamp from current time in Python. +// +// timestamp = Timestamp() +// timestamp.GetCurrentTime() +// +// # JSON Mapping +// +// In JSON format, the Timestamp type is encoded as a string in the +// [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format. That is, the +// format is "{year}-{month}-{day}T{hour}:{min}:{sec}[.{frac_sec}]Z" +// where {year} is always expressed using four digits while {month}, {day}, +// {hour}, {min}, and {sec} are zero-padded to two digits each. The fractional +// seconds, which can go up to 9 digits (i.e. up to 1 nanosecond resolution), +// are optional. The "Z" suffix indicates the timezone ("UTC"); the timezone +// is required, though only UTC (as indicated by "Z") is presently supported. +// +// For example, "2017-01-15T01:30:15.01Z" encodes 15.01 seconds past +// 01:30 UTC on January 15, 2017. +// +// In JavaScript, one can convert a Date object to this format using the +// standard [toISOString()](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Date/toISOString] +// method. In Python, a standard `datetime.datetime` object can be converted +// to this format using [`strftime`](https://docs.python.org/2/library/time.html#time.strftime) +// with the time format spec '%Y-%m-%dT%H:%M:%S.%fZ'. Likewise, in Java, one +// can use the Joda Time's [`ISODateTimeFormat.dateTime()`]( +// http://www.joda.org/joda-time/apidocs/org/joda/time/format/ISODateTimeFormat.html#dateTime--) +// to obtain a formatter capable of generating timestamps in this format. +// +// +message Timestamp { + + // Represents seconds of UTC time since Unix epoch + // 1970-01-01T00:00:00Z. Must be from 0001-01-01T00:00:00Z to + // 9999-12-31T23:59:59Z inclusive. + int64 seconds = 1; + + // Non-negative fractions of a second at nanosecond resolution. Negative + // second values with fractions must still have non-negative nanos values + // that count forward in time. Must be from 0 to 999,999,999 + // inclusive. + int32 nanos = 2; +} diff --git a/bin/protoc-3.5.1-win32/include/google/protobuf/type.proto b/bin/protoc-3.5.1-win32/include/google/protobuf/type.proto new file mode 100644 index 0000000..624c15e --- /dev/null +++ b/bin/protoc-3.5.1-win32/include/google/protobuf/type.proto @@ -0,0 +1,187 @@ +// Protocol Buffers - Google's data interchange format +// Copyright 2008 Google Inc. All rights reserved. +// https://developers.google.com/protocol-buffers/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +syntax = "proto3"; + +package google.protobuf; + +import "google/protobuf/any.proto"; +import "google/protobuf/source_context.proto"; + +option csharp_namespace = "Google.Protobuf.WellKnownTypes"; +option cc_enable_arenas = true; +option java_package = "com.google.protobuf"; +option java_outer_classname = "TypeProto"; +option java_multiple_files = true; +option objc_class_prefix = "GPB"; +option go_package = "google.golang.org/genproto/protobuf/ptype;ptype"; + +// A protocol buffer message type. +message Type { + // The fully qualified message name. + string name = 1; + // The list of fields. + repeated Field fields = 2; + // The list of types appearing in `oneof` definitions in this type. + repeated string oneofs = 3; + // The protocol buffer options. + repeated Option options = 4; + // The source context. + SourceContext source_context = 5; + // The source syntax. + Syntax syntax = 6; +} + +// A single field of a message type. +message Field { + // Basic field types. + enum Kind { + // Field type unknown. + TYPE_UNKNOWN = 0; + // Field type double. + TYPE_DOUBLE = 1; + // Field type float. + TYPE_FLOAT = 2; + // Field type int64. + TYPE_INT64 = 3; + // Field type uint64. + TYPE_UINT64 = 4; + // Field type int32. + TYPE_INT32 = 5; + // Field type fixed64. + TYPE_FIXED64 = 6; + // Field type fixed32. + TYPE_FIXED32 = 7; + // Field type bool. + TYPE_BOOL = 8; + // Field type string. + TYPE_STRING = 9; + // Field type group. Proto2 syntax only, and deprecated. + TYPE_GROUP = 10; + // Field type message. + TYPE_MESSAGE = 11; + // Field type bytes. + TYPE_BYTES = 12; + // Field type uint32. + TYPE_UINT32 = 13; + // Field type enum. + TYPE_ENUM = 14; + // Field type sfixed32. + TYPE_SFIXED32 = 15; + // Field type sfixed64. + TYPE_SFIXED64 = 16; + // Field type sint32. + TYPE_SINT32 = 17; + // Field type sint64. + TYPE_SINT64 = 18; + }; + + // Whether a field is optional, required, or repeated. + enum Cardinality { + // For fields with unknown cardinality. + CARDINALITY_UNKNOWN = 0; + // For optional fields. + CARDINALITY_OPTIONAL = 1; + // For required fields. Proto2 syntax only. + CARDINALITY_REQUIRED = 2; + // For repeated fields. + CARDINALITY_REPEATED = 3; + }; + + // The field type. + Kind kind = 1; + // The field cardinality. + Cardinality cardinality = 2; + // The field number. + int32 number = 3; + // The field name. + string name = 4; + // The field type URL, without the scheme, for message or enumeration + // types. Example: `"type.googleapis.com/google.protobuf.Timestamp"`. + string type_url = 6; + // The index of the field type in `Type.oneofs`, for message or enumeration + // types. The first type has index 1; zero means the type is not in the list. + int32 oneof_index = 7; + // Whether to use alternative packed wire representation. + bool packed = 8; + // The protocol buffer options. + repeated Option options = 9; + // The field JSON name. + string json_name = 10; + // The string value of the default value of this field. Proto2 syntax only. + string default_value = 11; +} + +// Enum type definition. +message Enum { + // Enum type name. + string name = 1; + // Enum value definitions. + repeated EnumValue enumvalue = 2; + // Protocol buffer options. + repeated Option options = 3; + // The source context. + SourceContext source_context = 4; + // The source syntax. + Syntax syntax = 5; +} + +// Enum value definition. +message EnumValue { + // Enum value name. + string name = 1; + // Enum value number. + int32 number = 2; + // Protocol buffer options. + repeated Option options = 3; +} + +// A protocol buffer option, which can be attached to a message, field, +// enumeration, etc. +message Option { + // The option's name. For protobuf built-in options (options defined in + // descriptor.proto), this is the short name. For example, `"map_entry"`. + // For custom options, it should be the fully-qualified name. For example, + // `"google.api.http"`. + string name = 1; + // The option's value packed in an Any message. If the value is a primitive, + // the corresponding wrapper type defined in google/protobuf/wrappers.proto + // should be used. If the value is an enum, it should be stored as an int32 + // value using the google.protobuf.Int32Value type. + Any value = 2; +} + +// The syntax in which a protocol buffer element is defined. +enum Syntax { + // Syntax `proto2`. + SYNTAX_PROTO2 = 0; + // Syntax `proto3`. + SYNTAX_PROTO3 = 1; +} diff --git a/bin/protoc-3.5.1-win32/include/google/protobuf/wrappers.proto b/bin/protoc-3.5.1-win32/include/google/protobuf/wrappers.proto new file mode 100644 index 0000000..0194763 --- /dev/null +++ b/bin/protoc-3.5.1-win32/include/google/protobuf/wrappers.proto @@ -0,0 +1,118 @@ +// Protocol Buffers - Google's data interchange format +// Copyright 2008 Google Inc. All rights reserved. +// https://developers.google.com/protocol-buffers/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Wrappers for primitive (non-message) types. These types are useful +// for embedding primitives in the `google.protobuf.Any` type and for places +// where we need to distinguish between the absence of a primitive +// typed field and its default value. + +syntax = "proto3"; + +package google.protobuf; + +option csharp_namespace = "Google.Protobuf.WellKnownTypes"; +option cc_enable_arenas = true; +option go_package = "github.com/golang/protobuf/ptypes/wrappers"; +option java_package = "com.google.protobuf"; +option java_outer_classname = "WrappersProto"; +option java_multiple_files = true; +option objc_class_prefix = "GPB"; + +// Wrapper message for `double`. +// +// The JSON representation for `DoubleValue` is JSON number. +message DoubleValue { + // The double value. + double value = 1; +} + +// Wrapper message for `float`. +// +// The JSON representation for `FloatValue` is JSON number. +message FloatValue { + // The float value. + float value = 1; +} + +// Wrapper message for `int64`. +// +// The JSON representation for `Int64Value` is JSON string. +message Int64Value { + // The int64 value. + int64 value = 1; +} + +// Wrapper message for `uint64`. +// +// The JSON representation for `UInt64Value` is JSON string. +message UInt64Value { + // The uint64 value. + uint64 value = 1; +} + +// Wrapper message for `int32`. +// +// The JSON representation for `Int32Value` is JSON number. +message Int32Value { + // The int32 value. + int32 value = 1; +} + +// Wrapper message for `uint32`. +// +// The JSON representation for `UInt32Value` is JSON number. +message UInt32Value { + // The uint32 value. + uint32 value = 1; +} + +// Wrapper message for `bool`. +// +// The JSON representation for `BoolValue` is JSON `true` and `false`. +message BoolValue { + // The bool value. + bool value = 1; +} + +// Wrapper message for `string`. +// +// The JSON representation for `StringValue` is JSON string. +message StringValue { + // The string value. + string value = 1; +} + +// Wrapper message for `bytes`. +// +// The JSON representation for `BytesValue` is JSON string. +message BytesValue { + // The bytes value. + bytes value = 1; +} diff --git a/bin/protoc-3.5.1-win32/readme.txt b/bin/protoc-3.5.1-win32/readme.txt new file mode 100644 index 0000000..e1a718d --- /dev/null +++ b/bin/protoc-3.5.1-win32/readme.txt @@ -0,0 +1,15 @@ +Protocol Buffers - Google's data interchange format +Copyright 2008 Google Inc. +https://developers.google.com/protocol-buffers/ + +This package contains a precompiled binary version of the protocol buffer +compiler (protoc). This binary is intended for users who want to use Protocol +Buffers in languages other than C++ but do not want to compile protoc +themselves. To install, simply place this binary somewhere in your PATH. + +If you intend to use the included well known types then don't forget to +copy the contents of the 'include' directory somewhere as well, for example +into '/usr/local/include/'. + +Please refer to our official github site for more installation instructions: + https://github.com/google/protobuf diff --git a/bin/protoc-gen-go.exe b/bin/protoc-gen-go.exe new file mode 100644 index 0000000..2a8619b Binary files /dev/null and b/bin/protoc-gen-go.exe differ diff --git a/core/admin/admin.go b/core/admin/admin.go new file mode 100644 index 0000000..066195c --- /dev/null +++ b/core/admin/admin.go @@ -0,0 +1,154 @@ +package admin + +import ( + "encoding/json" + "fmt" + "net" + "net/http" + + "mongo.games.com/goserver/core/logger" + "mongo.games.com/goserver/core/schedule" + "mongo.games.com/goserver/core/utils" +) + +// MyAdminApp is the default AdminApp used by admin module. +var MyAdminApp *AdminApp + +// AdminIndex is the default http.Handler for admin module. +// it matches url pattern "/". +func AdminIndex(rw http.ResponseWriter, r *http.Request) { + rw.Write([]byte("Welcome to Admin Dashboard\n")) + rw.Write([]byte("There are servral functions:\n")) + rw.Write([]byte(fmt.Sprintf("1. Get runtime profiling data by the pprof, http://%s:%d/prof\n", Config.AdminHttpAddr, Config.AdminHttpPort))) + rw.Write([]byte(fmt.Sprintf("2. Get healthcheck result from http://%s:%d/healthcheck\n", Config.AdminHttpAddr, Config.AdminHttpPort))) + rw.Write([]byte(fmt.Sprintf("3. Get current task infomation from task http://%s:%d/task \n", Config.AdminHttpAddr, Config.AdminHttpPort))) + rw.Write([]byte(fmt.Sprintf("4. To run a task passed a param http://%s:%d/runtask\n", Config.AdminHttpAddr, Config.AdminHttpPort))) + rw.Write([]byte(fmt.Sprintf("5. Get all confige & router infomation http://%s:%d/listconf\n", Config.AdminHttpAddr, Config.AdminHttpPort))) + +} + +// ListConf is the http.Handler of displaying all configuration values as key/value pair. +// it's registered with url pattern "/listconf" in admin module. +func ListConf(rw http.ResponseWriter, r *http.Request) { + rw.Write([]byte("unimpletement")) +} + +// ProfIndex is a http.Handler for showing profile command. +// it's in url pattern "/prof" in admin module. +func ProfIndex(rw http.ResponseWriter, r *http.Request) { + r.ParseForm() + command := r.Form.Get("command") + if command != "" { + utils.ProcessInput(command, rw) + } else { + rw.Write([]byte("request url like '/prof?command=lookup goroutine'\n")) + rw.Write([]byte("the command have below types:\n")) + rw.Write([]byte("1. lookup goroutine\n")) + rw.Write([]byte("2. lookup heap\n")) + rw.Write([]byte("3. lookup threadcreate\n")) + rw.Write([]byte("4. lookup block\n")) + rw.Write([]byte("5. start cpuprof\n")) + rw.Write([]byte("6. stop cpuprof\n")) + rw.Write([]byte("7. get memprof\n")) + rw.Write([]byte("8. gc summary\n")) + rw.Write([]byte("9. logic statistics\n")) + } +} + +// Healthcheck is a http.Handler calling health checking and showing the result. +// it's in "/healthcheck" pattern in admin module. +func Healthcheck(rw http.ResponseWriter, req *http.Request) { + defer utils.DumpStackIfPanic("Admin Healthcheck") + for name, h := range utils.AdminCheckList { + if err := h.Check(); err == nil { + fmt.Fprintf(rw, "%s : ok\n", name) + } else { + fmt.Fprintf(rw, "%s : %s\n", name, err.Error()) + } + } +} + +// TaskStatus is a http.Handler with running task status (task name, status and the last execution). +// it's in "/task" pattern in admin module. +func TaskStatus(rw http.ResponseWriter, req *http.Request) { + tasks := schedule.GetAllTask() + for tname, tk := range tasks { + fmt.Fprintf(rw, "%s:%s:%s", tname, tk.GetStatus(), tk.GetPrev().String()) + } +} + +type TaskRunResult struct { + Code int + Err string +} + +// RunTask is a http.Handler to run a Task from the "query string. +// the request url likes /runtask?taskname=sendmail. +func RunTask(rw http.ResponseWriter, req *http.Request) { + defer req.ParseForm() + taskname := req.Form.Get("taskname") + trr := &TaskRunResult{} + t := schedule.GetTask(taskname) + if t != nil { + err := t.Run() + if err != nil { + trr.Code = 1 + trr.Err = err.Error() + } else { + trr.Code = 0 + } + } else { + trr.Err = fmt.Sprintf("there's no task which named:%s", taskname) + trr.Code = 2 + } + b, _ := json.Marshal(trr) + fmt.Println(string(b[:])) + rw.Write(b) +} + +// AdminApp is an http.HandlerFunc map used as AdminApp. +type AdminApp struct { + routers map[string]http.HandlerFunc +} + +// Route adds http.HandlerFunc to AdminApp with url pattern. +func (admin *AdminApp) Route(pattern string, f http.HandlerFunc) { + admin.routers[pattern] = f +} + +// Start AdminApp http server. +// Its addr is defined in configuration file as adminhttpaddr and adminhttpport. +func (admin *AdminApp) Start(AdminHttpAddr string, AdminHttpPort int) { + for p, f := range admin.routers { + http.Handle(p, f) + } + + addr := fmt.Sprintf("%s:%d", AdminHttpAddr, AdminHttpPort) + l, err := net.Listen("tcp", addr) + if err != nil { + logger.Logger.Critical("Admin Listen error: ", err) + return + } + + logger.Logger.Infof("Admin Serve: %s", l.Addr()) + + go func() { + server := &http.Server{} + err = server.Serve(l) + if err != nil { + logger.Logger.Critical("Admin Serve: ", err) + } + }() +} + +func init() { + MyAdminApp = &AdminApp{ + routers: make(map[string]http.HandlerFunc), + } + //MyAdminApp.Route("/", AdminIndex) + //MyAdminApp.Route("/prof", ProfIndex) + //MyAdminApp.Route("/healthcheck", Healthcheck) + //MyAdminApp.Route("/task", TaskStatus) + //MyAdminApp.Route("/runtask", RunTask) + //MyAdminApp.Route("/listconf", ListConf) +} diff --git a/core/admin/config.go b/core/admin/config.go new file mode 100644 index 0000000..a25c7eb --- /dev/null +++ b/core/admin/config.go @@ -0,0 +1,33 @@ +package admin + +import ( + "mongo.games.com/goserver/core" +) + +var Config = Configuration{} + +type Configuration struct { + SupportAdmin bool + AdminHttpAddr string + AdminHttpPort int + WhiteHttpAddr []string +} + +func (c *Configuration) Name() string { + return "admin" +} + +func (c *Configuration) Init() error { + if c.SupportAdmin { + MyAdminApp.Start(c.AdminHttpAddr, c.AdminHttpPort) + } + return nil +} + +func (c *Configuration) Close() error { + return nil +} + +func init() { + core.RegistePackage(&Config) +} diff --git a/core/basic/command.go b/core/basic/command.go new file mode 100644 index 0000000..e4b86e0 --- /dev/null +++ b/core/basic/command.go @@ -0,0 +1,13 @@ +package basic + +// Object to process the command. + +type Command interface { + Done(*Object) error +} + +type CommandWrapper func(*Object) error + +func (cw CommandWrapper) Done(o *Object) error { + return cw(o) +} diff --git a/core/basic/command_own.go b/core/basic/command_own.go new file mode 100644 index 0000000..fbaaf96 --- /dev/null +++ b/core/basic/command_own.go @@ -0,0 +1,32 @@ +package basic + +import "mongo.games.com/goserver/core/container" + +type ownCommand struct { + c *Object +} + +func (oc *ownCommand) Done(o *Object) error { + + defer o.ProcessSeqnum() + + // If the object is already being shut down, new owned objects are + // immediately asked to terminate. Note that linger is set to zero. + if o.terminating { + o.termAcks++ + SendTerm(oc.c) + return nil + } + + // Store the reference to the owned object. + if o.childs == nil { + o.childs = container.NewSynchronizedMap() + } + o.childs.Set(oc.c.Id, oc.c) + + return nil +} + +func SendOwn(p *Object, c *Object) bool { + return p.SendCommand(&ownCommand{c: c}, true) +} diff --git a/core/basic/command_term.go b/core/basic/command_term.go new file mode 100644 index 0000000..96b72d4 --- /dev/null +++ b/core/basic/command_term.go @@ -0,0 +1,21 @@ +package basic + +var termCmd = &termCommand{} + +type termCommand struct { +} + +func (tc *termCommand) Done(o *Object) error { + if o == nil { + return nil + } + + // Double termination should never happen. + o.processTerm() + + return nil +} + +func SendTerm(o *Object) bool { + return o.SendCommand(termCmd, false) +} diff --git a/core/basic/command_termack.go b/core/basic/command_termack.go new file mode 100644 index 0000000..47aff05 --- /dev/null +++ b/core/basic/command_termack.go @@ -0,0 +1,25 @@ +package basic + +var termAckCmd = &termAckCommand{} + +type termAckCommand struct { +} + +func (tac *termAckCommand) Done(o *Object) error { + if o == nil { + return nil + } + + if o.termAcks > 0 { + o.termAcks-- + + // This may be a last ack we are waiting for before termination... + o.checkTermAcks() + } + + return nil +} + +func SendTermAck(p *Object) bool { + return p.SendCommand(termAckCmd, false) +} diff --git a/core/basic/command_termreq.go b/core/basic/command_termreq.go new file mode 100644 index 0000000..65a3e96 --- /dev/null +++ b/core/basic/command_termreq.go @@ -0,0 +1,33 @@ +package basic + +type termReqCommand struct { + c *Object +} + +func (trc *termReqCommand) Done(o *Object) error { + if o == nil { + return nil + } + + // When shutting down we can ignore termination requests from owned + // objects. The termination request was already sent to the object. + if o.terminating { + return nil + } + + // If I/O object is well and alive let's ask it to terminate. + if o.childs.IsExist(trc.c.Id) { + o.termAcks++ + // Note that this object is the root of the (partial shutdown) thus, its + // value of linger is used, rather than the value stored by the children. + SendTerm(trc.c) + // Remove child + o.childs.Delete(trc.c.Id) + } + + return nil +} + +func SendTermReq(p *Object, c *Object) bool { + return p.SendCommand(&termReqCommand{c: c}, false) +} diff --git a/core/basic/cond.go b/core/basic/cond.go new file mode 100644 index 0000000..60bd563 --- /dev/null +++ b/core/basic/cond.go @@ -0,0 +1,72 @@ +package basic + +import ( + "sync/atomic" + "time" +) + +type Cond struct { + notify chan struct{} + countor int32 +} + +func NewCond(waitor int) *Cond { + return &Cond{notify: make(chan struct{}, waitor)} +} + +func (c *Cond) Wait() { + atomic.AddInt32(&c.countor, 1) + defer atomic.AddInt32(&c.countor, -1) + + select { + case <-c.notify: + } +} + +func (c *Cond) WaitForTimeout(dura time.Duration) bool { + atomic.AddInt32(&c.countor, 1) + defer atomic.AddInt32(&c.countor, -1) + + select { + case <-c.notify: + case <-time.Tick(dura): + return true + } + return false +} + +func (c *Cond) WaitForTick(ticker *time.Ticker) bool { + atomic.AddInt32(&c.countor, 1) + defer atomic.AddInt32(&c.countor, -1) + + select { + case <-c.notify: + case <-ticker.C: + return true + } + return false +} + +func (c *Cond) Signal() { + select { + case c.notify <- struct{}{}: + default: + return + } +} + +func (c *Cond) Drain() { + for { + select { + case <-c.notify: + default: + return + } + } +} + +func (c *Cond) Broadcast() { + for atomic.LoadInt32(&c.countor) > 0 { + c.notify <- struct{}{} + } +} diff --git a/core/basic/object.go b/core/basic/object.go new file mode 100644 index 0000000..42f1a6d --- /dev/null +++ b/core/basic/object.go @@ -0,0 +1,432 @@ +package basic + +import ( + "container/list" + "sync" + "sync/atomic" + "time" + + "fmt" + "mongo.games.com/goserver/core/container" + "mongo.games.com/goserver/core/logger" + "mongo.games.com/goserver/core/utils" +) + +const ( + DefaultQueueBacklog int = 4 +) + +var ( +// Waitor = utils.NewWaitor() +) + +// Base class for need alone goroutine objects +// that easy to start and when to exit the unified management +// Feature. +// establish a tree structure between objects +// asynchronous message queue +type Object struct { + *utils.Waitor + sync.RWMutex + // Identify + Id int + + // Name + Name string + + // True if termination was already initiated. If so, we can destroy + // the object if there are no more child objects or pending term acks. + terminating bool + + // True if termination was already finished. + terminated bool + + // Sequence number of the last command sent to this object. + sentSeqnum uint32 + + // Sequence number of the last command processed by this object. + processedSeqnum uint32 + + // Number of events we have to get before we can destroy the object. + termAcks int + + // List of all objects owned by this object. We are responsible + // for deallocating them before we quit. + childs *container.SynchronizedMap + + // Socket owning this object. It's responsible for shutting down + // this object. + owner *Object + + // Command queue + que *list.List + + // Configuration Options + opt Options + + // Currently resides goroutine id. I do not know how get it. + gid int + + // + waitActive chan struct{} + // + waitEnlarge chan struct{} + + // UserData + UserData interface{} + // + sinker Sinker + // + timer *time.Ticker + //object local storage + ols [OLS_MAX_SLOT]interface{} + // + recvCmdCnt int64 + // + sendCmdCnt int64 + // + cond *Cond +} + +func NewObject(id int, name string, opt Options, sinker Sinker) *Object { + o := &Object{ + Id: id, + Name: name, + opt: opt, + sinker: sinker, + waitActive: make(chan struct{}, 1), + waitEnlarge: make(chan struct{}, 1), + childs: container.NewSynchronizedMap(), + cond: NewCond(1), + } + + o.init() + go func() { + defer func() { + if err := recover(); err != nil { + logger.Logger.Error(o, "panic, o.ProcessCommand error=", err) + } + }() + o.ProcessCommand() + }() + + return o +} + +func (o *Object) GetTreeName() string { + name := o.Name + parent := o.owner + for parent != nil { + name = parent.Name + "/" + name + parent = parent.owner + } + return "/" + name +} + +func (o *Object) init() { + o.que = list.New() +} + +// Active inner goroutine +func (o *Object) Active() { + o.waitActive <- struct{}{} +} + +// Launch the supplied object and become its owner. +func (o *Object) LaunchChild(c *Object) { + if c == nil { + return + } + + if c.owner != nil { + panic("An object can have only one parent node") + } + + c.owner = o + c.Waitor = o.Waitor + c.Active() + c.safeStart() + + // Take ownership of the object. + SendOwn(o, c) +} + +// thread safe +func (o *Object) GetChildById(id int) *Object { + c := o.childs.Get(id) + if cc, ok := c.(*Object); ok { + return cc + } + return nil +} + +// When another owned object wants to send command to this object +// it calls this function to let it know it should not shut down +// before the command is delivered. +func (o *Object) incSeqnum() { + atomic.AddUint32(&(o.sentSeqnum), 1) +} + +// Special handler called after a command that requires a seqnum +// was processed. The implementation should catch up with its counter +// of processed commands here. +func (o *Object) ProcessSeqnum() { + // Catch up with counter of processed commands. + o.processedSeqnum++ + + // We may have catched up and still have pending terms acks. + o.checkTermAcks() +} + +// Check whether all the peding term acks were delivered. +// If so, deallocate this object. +func (o *Object) checkTermAcks() { + name := o.GetTreeName() + logger.Logger.Debugf("(%v) object checkTermAcks terminating=%v processedSeqnum=%v sentSeqnum=%v termAcks=%v ", name, o.terminating, o.processedSeqnum, o.sentSeqnum, o.termAcks) + if o.terminating && o.processedSeqnum == o.sentSeqnum && o.termAcks == 0 { + + // Sanity check. There should be no active children at this point. + + // The root object has nobody to confirm the termination to. + // Other nodes will confirm the termination to the owner. + if o.owner != nil { + logger.Logger.Debugf("(%v)->(%v) Object SendTermAck ", o.Name, o.owner.Name) + SendTermAck(o.owner) + } + + // Deallocate the resources. + o.processDestroy() + } +} + +// Ask owner object to terminate this object. It may take a while +// while actual termination is started. This function should not be +// called more than once. +func (o *Object) Terminate(s *Object) { + // If termination is already underway, there's no point + // in starting it anew. + if o.terminating { + return + } + + name := o.GetTreeName() + logger.Logger.Debugf("(%v) object Terminate ", name) + // As for the root of the ownership tree, there's noone to terminate it, + // so it has to terminate itself. + if o.owner == nil { + o.processTerm() + return + } + + // If I am an owned object, I'll ask my owner to terminate me. + SendTermReq(o.owner, o) +} + +// Term handler is protocted rather than private so that it can +// be intercepted by the derived class. This is useful to add custom +// steps to the beginning of the termination process. +func (o *Object) processTerm() { + // Double termination should never happen. + if o.terminating { + return + } + + // Send termination request to all owned objects. + cnt := 0 + childs := o.childs.Items() + for _, c := range childs { + if cc, ok := c.(*Object); ok && cc != nil { + SendTerm(cc) + cnt++ + } + } + o.termAcks += cnt + + name := o.GetTreeName() + logger.Logger.Debugf("(%v) object processTerm, termAcks=%v", name, o.termAcks) + + o.safeStop() + // Start termination process and check whether by chance we cannot + // terminate immediately. + o.terminating = true + o.checkTermAcks() +} + +// A place to hook in when phyicallal destruction of the object +// is to be delayed. +func (o *Object) processDestroy() { + name := o.GetTreeName() + logger.Logger.Debugf("(%v) object processDestroy ", name) + o.terminated = true + //clear ols + o.OlsClrValue() +} + +func (o *Object) GetPendingCommandCnt() int { + o.RLock() + cnt := o.que.Len() + o.RUnlock() + return cnt +} + +// Enqueue command +func (o *Object) SendCommand(c Command, incseq bool) bool { + if incseq { + o.incSeqnum() + } + + o.Lock() + o.que.PushBack(c) + o.Unlock() + + atomic.AddInt64(&o.sendCmdCnt, 1) + + //notify + o.cond.Signal() + return true +} + +// Dequeue command and process it. +func (o *Object) ProcessCommand() { + + //wait for active + <-o.waitActive + + //deamon or no + if o.Waitor != nil { + o.Waitor.Add(o.Name, 1) + defer o.Waitor.Done(o.Name) + } + + var tickMode bool + if o.opt.Interval > 0 && o.sinker != nil && o.timer == nil { + o.timer = time.NewTicker(o.opt.Interval) + defer o.timer.Stop() + tickMode = true + } + + name := o.GetTreeName() + logger.Logger.Debug("(", name, ") object active!!!") + doneCnt := 0 + for !o.terminated { + cnt := o.GetPendingCommandCnt() + if cnt == 0 { + if tickMode { + if o.cond.WaitForTick(o.timer) { + //logger.Logger.Debug("(", name, ") object safeTick 1 ", time.Now()) + o.safeTick() + doneCnt = 0 + continue + } + } else { + o.cond.Wait() + } + } + + o.Lock() + e := o.que.Front() + if e != nil { + o.que.Remove(e) + } + o.Unlock() + + if e != nil { + if cmd, ok := e.Value.(Command); ok { + o.safeDone(cmd) + doneCnt++ + } + } + + if tickMode { + select { + case <-o.timer.C: + //logger.Logger.Debug("(", name, ") object safeTick 2 ", time.Now()) + o.safeTick() + doneCnt = 0 + default: + } + // 在一个心跳周期内待处理任务过多 + // cnt 剩余任务数量(待处理任务) + // MaxDone 允许最大待处理任务数量 + // doneCnt 当前心跳周期内已经处理的任务数量 + if doneCnt > o.opt.MaxDone || cnt > o.opt.MaxDone { + logger.Logger.Warn("(", name, ") object queue cmd count(", cnt, ") maxdone(", o.opt.MaxDone, ")", " this tick process cnt(", doneCnt, ")") + } + } + } + + cnt := o.GetPendingCommandCnt() + logger.Logger.Debug("(", name, ") object ProcessCommand done!!! queue rest cmd count(", cnt, ") ") +} + +func (o *Object) safeDone(cmd Command) { + defer utils.DumpStackIfPanic("Object::Command::Done") + if StatsWatchMgr != nil { + watch := StatsWatchMgr.WatchStart(fmt.Sprintf("/object/%v/cmdone", o.Name), 4) + if watch != nil { + defer watch.Stop() + } + } + + err := cmd.Done(o) + atomic.AddInt64(&o.recvCmdCnt, 1) + if err != nil { + panic(err) + } +} + +func (o *Object) safeStart() { + defer utils.DumpStackIfPanic("Object::OnStart") + + if o.sinker != nil { + o.sinker.OnStart() + } +} + +func (o *Object) safeTick() { + defer utils.DumpStackIfPanic("Object::OnTick") + + if o.sinker != nil { + o.sinker.OnTick() + } +} + +func (o *Object) safeStop() { + defer utils.DumpStackIfPanic("Object::OnStop") + + if o.sinker != nil { + o.sinker.OnStop() + } +} + +func (o *Object) IsTermiated() bool { + return o.terminated +} + +func (o *Object) StatsSelf() (stats CmdStats) { + stats.PendingCnt = int64(o.GetPendingCommandCnt()) + stats.SendCmdCnt = atomic.LoadInt64(&o.sendCmdCnt) + stats.RecvCmdCnt = atomic.LoadInt64(&o.recvCmdCnt) + return +} + +func (o *Object) GetStats() map[string]CmdStats { + if o.childs == nil { + return nil + } + stats := make(map[string]CmdStats) + stats[o.GetTreeName()] = o.StatsSelf() + childs := o.childs.Items() + for _, c := range childs { + if cc, ok := c.(*Object); ok && cc != nil { + stats[cc.GetTreeName()] = cc.StatsSelf() + subStats := cc.GetStats() + if subStats != nil && len(subStats) > 0 { + for k, v := range subStats { + stats[k] = v + } + } + } + } + return stats +} diff --git a/core/basic/object_test.go b/core/basic/object_test.go new file mode 100644 index 0000000..3eae564 --- /dev/null +++ b/core/basic/object_test.go @@ -0,0 +1,92 @@ +package basic + +import ( + "fmt" + "runtime" + "testing" + "time" +) + +func TestSendCommand(t *testing.T) { + n := 5 + opt := Options{ + Interval: time.Second, + MaxDone: n, + } + c := make(chan int) + o := NewObject(1, "test1", opt, nil) + o.Active() + for i := 0; i < n*2; i++ { + go func(tag int) { + o.SendCommand(CommandWrapper(func(*Object) error { + c <- tag + return nil + }), true) + }(i) + } + + go func() { + i := 0 + for { + i++ + if i%1000 == 0 { + runtime.Gosched() + } + } + }() + + slice := make([]int, 0, n*2) + for i := 0; i < n*2; i++ { + tag := <-c + slice = append(slice, tag) + } + if len(slice) != n*2 { + t.Fatal("Command be droped") + } + fmt.Println("TestSendCommand", slice) +} + +func TestSendCommandLoop(t *testing.T) { + n := 5 + m := n * 2 + opt := Options{ + Interval: time.Second, + MaxDone: n, + } + c := make(chan int) + o := NewObject(1, "test1", opt, nil) + o.Active() + for i := 0; i < n; i++ { + go func(tag int) { + o.SendCommand(CommandWrapper(func(oo *Object) error { + for j := 0; j < m; j++ { + func(tag2 int) { + oo.SendCommand(CommandWrapper(func(*Object) error { + c <- tag*1000 + tag2 + return nil + }), true) + }(j) + } + return nil + }), true) + }(i) + } + go func() { + i := 0 + for { + i++ + if i%1000 == 0 { + runtime.Gosched() + } + } + }() + slice := make([]int, 0, n*m) + for i := 0; i < n*m; i++ { + tag := <-c + slice = append(slice, tag) + } + if len(slice) != n*m { + t.Fatal("Command be droped") + } + fmt.Println("TestSendCommandLoop", slice, len(slice)) +} diff --git a/core/basic/objectlocalstorage.go b/core/basic/objectlocalstorage.go new file mode 100644 index 0000000..2972897 --- /dev/null +++ b/core/basic/objectlocalstorage.go @@ -0,0 +1,93 @@ +package basic + +import ( + "math" + "sync" + + "mongo.games.com/goserver/core/container" +) + +// Be similar to (Windows, Thread Local Storage) + +const OLS_MAX_SLOT uint = 64 +const OLS_INVALID_SLOT = math.MaxUint32 + +type OlsSlotCleanHandler func(interface{}) + +var objSlotFlag uint64 +var objSlotLock sync.Mutex +var objSlotCleanHandler [OLS_MAX_SLOT]OlsSlotCleanHandler +var objSlotHolder = container.NewSynchronizedMap() + +func OlsAlloc() uint { + objSlotLock.Lock() + for i := uint(0); i < 64; i++ { + if ((1 << i) & objSlotFlag) == 0 { + objSlotFlag |= (1 << i) + objSlotLock.Unlock() + return i + } + } + objSlotLock.Unlock() + return OLS_INVALID_SLOT +} + +func OlsFree(slot uint) { + objSlotLock.Lock() + defer objSlotLock.Unlock() + if slot < OLS_MAX_SLOT { + handler := objSlotCleanHandler[slot] + flag := objSlotFlag & (1 << slot) + if handler != nil && flag != 0 { + objSlotFlag ^= (1 << slot) + objSlotHolder.Foreach(func(k, v interface{}) { + if o, ok := k.(*Object); ok && o != nil { + v := o.ols[slot] + if v != nil { + o.ols[slot] = nil + handler(v) + } + } + }) + } + } +} + +func OlsInstallSlotCleanHandler(slot uint, handler OlsSlotCleanHandler) { + if slot < OLS_MAX_SLOT { + objSlotCleanHandler[slot] = handler + } +} + +func (o *Object) OlsGetValue(slot uint) interface{} { + if slot < OLS_MAX_SLOT { + return o.ols[slot] + } + return nil +} + +func (o *Object) OlsSetValue(slot uint, val interface{}) { + if slot < OLS_MAX_SLOT { + old := o.ols[slot] + o.ols[slot] = val + if old != nil { + handler := objSlotCleanHandler[slot] + if handler != nil { + handler(old) + } + } + objSlotHolder.Set(o, struct{}{}) + } +} + +func (o *Object) OlsClrValue() { + for i := uint(0); i < OLS_MAX_SLOT; i++ { + v := o.ols[i] + if v != nil { + handler := objSlotCleanHandler[i] + if handler != nil { + handler(v) + } + } + } +} diff --git a/core/basic/objectmonitor.go b/core/basic/objectmonitor.go new file mode 100644 index 0000000..16bb5b8 --- /dev/null +++ b/core/basic/objectmonitor.go @@ -0,0 +1,29 @@ +package basic + +var StatsWatchMgr IStatsWatchMgr + +type ObjectMonitor struct { +} + +func (om *ObjectMonitor) OnStart(o *Object) { +} + +func (om *ObjectMonitor) OnTick(o *Object) { +} + +func (om *ObjectMonitor) OnStop(o *Object) { +} + +type IStatsWatchMgr interface { + WatchStart(name string, elementype int) IStatsWatch +} + +type IStatsWatch interface { + Stop() +} + +type CmdStats struct { + PendingCnt int64 + SendCmdCnt int64 + RecvCmdCnt int64 +} diff --git a/core/basic/options.go b/core/basic/options.go new file mode 100644 index 0000000..3e802f5 --- /dev/null +++ b/core/basic/options.go @@ -0,0 +1,19 @@ +package basic + +import ( + "time" +) + +const ( + QueueType_List int = iota + QueueType_Chan +) + +type Options struct { + // HeartBeat interval + Interval time.Duration + // The maximum number of processing each heartbeat + MaxDone int + // + QueueBacklog int +} diff --git a/core/basic/sinker.go b/core/basic/sinker.go new file mode 100644 index 0000000..8e90626 --- /dev/null +++ b/core/basic/sinker.go @@ -0,0 +1,7 @@ +package basic + +type Sinker interface { + OnStart() + OnTick() + OnStop() +} diff --git a/core/broker/broker.go b/core/broker/broker.go new file mode 100644 index 0000000..4095a3a --- /dev/null +++ b/core/broker/broker.go @@ -0,0 +1,39 @@ +// Package broker is an interface used for asynchronous messaging +package broker + +// Broker is an interface used for asynchronous messaging. +type Broker interface { + Init(...Option) error + Options() Options + Address() string + Connect() error + Disconnect() error + Publish(topic string, m *Message, opts ...PublishOption) error + Subscribe(topic string, h Handler, opts ...SubscribeOption) (Subscriber, error) + String() string +} + +// Handler is used to process messages via a subscription of a topic. +// The handler is passed a publication interface which contains the +// message and optional Ack method to acknowledge receipt of the message. +type Handler func(Event) error + +type Message struct { + Header map[string]string + Body []byte +} + +// Event is given to a subscription handler for processing +type Event interface { + Topic() string + Message() *Message + Ack() error + Error() error +} + +// Subscriber is a convenience return type for the Subscribe method +type Subscriber interface { + Options() SubscribeOptions + Topic() string + Unsubscribe() error +} diff --git a/core/broker/options.go b/core/broker/options.go new file mode 100644 index 0000000..c54ace5 --- /dev/null +++ b/core/broker/options.go @@ -0,0 +1,117 @@ +package broker + +import ( + "context" + "crypto/tls" +) + +type Options struct { + Addrs []string + Secure bool + + // Handler executed when error happens in broker mesage + // processing + ErrorHandler Handler + + TLSConfig *tls.Config + + // Other options for implementations of the interface + // can be stored in a context + Context context.Context +} + +type PublishOptions struct { + // Other options for implementations of the interface + // can be stored in a context + Context context.Context +} + +type SubscribeOptions struct { + // AutoAck defaults to true. When a handler returns + // with a nil error the message is acked. + AutoAck bool + // Subscribers with the same queue name + // will create a shared subscription where each + // receives a subset of messages. + Queue string + + // Other options for implementations of the interface + // can be stored in a context + Context context.Context +} + +type Option func(*Options) + +type PublishOption func(*PublishOptions) + +// PublishContext set context +func PublishContext(ctx context.Context) PublishOption { + return func(o *PublishOptions) { + o.Context = ctx + } +} + +type SubscribeOption func(*SubscribeOptions) + +func NewSubscribeOptions(opts ...SubscribeOption) SubscribeOptions { + opt := SubscribeOptions{ + AutoAck: true, + } + + for _, o := range opts { + o(&opt) + } + + return opt +} + +// Addrs sets the host addresses to be used by the broker +func Addrs(addrs ...string) Option { + return func(o *Options) { + o.Addrs = addrs + } +} + +// DisableAutoAck will disable auto acking of messages +// after they have been handled. +func DisableAutoAck() SubscribeOption { + return func(o *SubscribeOptions) { + o.AutoAck = false + } +} + +// ErrorHandler will catch all broker errors that cant be handled +// in normal way, for example Codec errors +func ErrorHandler(h Handler) Option { + return func(o *Options) { + o.ErrorHandler = h + } +} + +// Queue sets the name of the queue to share messages on +func Queue(name string) SubscribeOption { + return func(o *SubscribeOptions) { + o.Queue = name + } +} + +// Secure communication with the broker +func Secure(b bool) Option { + return func(o *Options) { + o.Secure = b + } +} + +// Specify TLS Config +func TLSConfig(t *tls.Config) Option { + return func(o *Options) { + o.TLSConfig = t + } +} + +// SubscribeContext set context +func SubscribeContext(ctx context.Context) SubscribeOption { + return func(o *SubscribeOptions) { + o.Context = ctx + } +} diff --git a/core/broker/rabbitmq/auth.go b/core/broker/rabbitmq/auth.go new file mode 100644 index 0000000..c31f9a5 --- /dev/null +++ b/core/broker/rabbitmq/auth.go @@ -0,0 +1,12 @@ +package rabbitmq + +type ExternalAuthentication struct { +} + +func (auth *ExternalAuthentication) Mechanism() string { + return "EXTERNAL" +} + +func (auth *ExternalAuthentication) Response() string { + return "" +} diff --git a/core/broker/rabbitmq/channel.go b/core/broker/rabbitmq/channel.go new file mode 100644 index 0000000..c02f5e3 --- /dev/null +++ b/core/broker/rabbitmq/channel.go @@ -0,0 +1,143 @@ +package rabbitmq + +// +// All credit to Mondo +// + +import ( + "errors" + + "github.com/google/uuid" + "github.com/streadway/amqp" +) + +type rabbitMQChannel struct { + uuid string + connection *amqp.Connection + channel *amqp.Channel +} + +func newRabbitChannel(conn *amqp.Connection, prefetchCount int, prefetchGlobal bool) (*rabbitMQChannel, error) { + id, err := uuid.NewRandom() + if err != nil { + return nil, err + } + rabbitCh := &rabbitMQChannel{ + uuid: id.String(), + connection: conn, + } + if err := rabbitCh.Connect(prefetchCount, prefetchGlobal); err != nil { + return nil, err + } + return rabbitCh, nil + +} + +func (r *rabbitMQChannel) Connect(prefetchCount int, prefetchGlobal bool) error { + var err error + r.channel, err = r.connection.Channel() + if err != nil { + return err + } + err = r.channel.Qos(prefetchCount, 0, prefetchGlobal) + if err != nil { + return err + } + return nil +} + +func (r *rabbitMQChannel) Close() error { + if r.channel == nil { + return errors.New("Channel is nil") + } + return r.channel.Close() +} + +func (r *rabbitMQChannel) Publish(exchange, key string, message amqp.Publishing) error { + if r.channel == nil { + return errors.New("Channel is nil") + } + return r.channel.Publish(exchange, key, false, false, message) +} + +func (r *rabbitMQChannel) DeclareExchange(exchange string) error { + return r.channel.ExchangeDeclare( + exchange, // name + "topic", // kind + false, // durable + false, // autoDelete + false, // internal + false, // noWait + nil, // args + ) +} + +func (r *rabbitMQChannel) DeclareDurableExchange(exchange string) error { + return r.channel.ExchangeDeclare( + exchange, // name + "topic", // kind + true, // durable + false, // autoDelete + false, // internal + false, // noWait + nil, // args + ) +} + +func (r *rabbitMQChannel) DeclareQueue(queue string, args amqp.Table) error { + _, err := r.channel.QueueDeclare( + queue, // name + false, // durable + true, // autoDelete + false, // exclusive + false, // noWait + args, // args + ) + return err +} + +func (r *rabbitMQChannel) DeclareDurableQueue(queue string, args amqp.Table) error { + _, err := r.channel.QueueDeclare( + queue, // name + true, // durable + false, // autoDelete + false, // exclusive + false, // noWait + args, // args + ) + return err +} + +func (r *rabbitMQChannel) DeclareReplyQueue(queue string) error { + _, err := r.channel.QueueDeclare( + queue, // name + false, // durable + true, // autoDelete + true, // exclusive + false, // noWait + nil, // args + ) + return err +} + +func (r *rabbitMQChannel) ConsumeQueue(queue string, autoAck bool) (<-chan amqp.Delivery, error) { + return r.channel.Consume( + queue, // queue + r.uuid, // consumer + autoAck, // autoAck + false, // exclusive + false, // nolocal + false, // nowait + nil, // args + ) +} + +func (r *rabbitMQChannel) BindQueue(queue, key, exchange string, args amqp.Table) error { + return r.channel.QueueBind( + queue, // name + key, // key + exchange, // exchange + false, // noWait + args, // args + ) +} diff --git a/core/broker/rabbitmq/connection.go b/core/broker/rabbitmq/connection.go new file mode 100644 index 0000000..7d06e63 --- /dev/null +++ b/core/broker/rabbitmq/connection.go @@ -0,0 +1,252 @@ +package rabbitmq + +// +// All credit to Mondo +// + +import ( + "crypto/tls" + "regexp" + "strings" + "sync" + "time" + + "github.com/streadway/amqp" +) + +var ( + DefaultExchange = Exchange{ + Name: "idealeak", + } + DefaultRabbitURL = "amqp://guest:guest@127.0.0.1:5672" + DefaultPrefetchCount = 0 + DefaultPrefetchGlobal = false + DefaultRequeueOnError = false + + // The amqp library does not seem to set these when using amqp.DialConfig + // (even though it says so in the comments) so we set them manually to make + // sure to not brake any existing functionality + defaultHeartbeat = 10 * time.Second + defaultLocale = "en_US" + + defaultAmqpConfig = amqp.Config{ + Heartbeat: defaultHeartbeat, + Locale: defaultLocale, + } + + dial = amqp.Dial + dialTLS = amqp.DialTLS + dialConfig = amqp.DialConfig +) + +type rabbitMQConn struct { + Connection *amqp.Connection + Channel *rabbitMQChannel + ExchangeChannel *rabbitMQChannel + exchange Exchange + url string + prefetchCount int + prefetchGlobal bool + + sync.Mutex + connected bool + + close chan bool // 关闭信号 + waitConnection chan struct{} // 建立连接中 +} + +// Exchange is the rabbitmq exchange +type Exchange struct { + // Name of the exchange + Name string + // Whether its persistent + Durable bool +} + +func newRabbitMQConn(ex Exchange, urls []string, prefetchCount int, prefetchGlobal bool) *rabbitMQConn { + var url string + + if len(urls) > 0 && regexp.MustCompile("^amqp(s)?://.*").MatchString(urls[0]) { + url = urls[0] + } else { + url = DefaultRabbitURL + } + + ret := &rabbitMQConn{ + exchange: ex, + url: url, + prefetchCount: prefetchCount, + prefetchGlobal: prefetchGlobal, + close: make(chan bool), + waitConnection: make(chan struct{}), + } + // its bad case of nil == waitConnection, so close it at start + close(ret.waitConnection) + return ret +} + +func (r *rabbitMQConn) connect(secure bool, config *amqp.Config) error { + // try connect + if err := r.tryConnect(secure, config); err != nil { + return err + } + + // connected + r.Lock() + r.connected = true + r.Unlock() + + // create reconnect loop + go r.reconnect(secure, config) + return nil +} + +func (r *rabbitMQConn) reconnect(secure bool, config *amqp.Config) { + // skip first connect + var connect bool + + for { + if connect { + // try reconnect + if err := r.tryConnect(secure, config); err != nil { + time.Sleep(1 * time.Second) + continue + } + + // connected + r.Lock() + r.connected = true + r.Unlock() + //unblock resubscribe cycle - close channel + //at this point channel is created and unclosed - close it without any additional checks + close(r.waitConnection) + } + + connect = true + notifyClose := make(chan *amqp.Error) + r.Connection.NotifyClose(notifyClose) + + // block until closed + select { + case <-notifyClose: + // block all resubscribe attempt - they are useless because there is no connection to rabbitmq + // create channel 'waitConnection' (at this point channel is nil or closed, create it without unnecessary checks) + r.Lock() + r.connected = false + r.waitConnection = make(chan struct{}) + r.Unlock() + case <-r.close: + return + } + } +} + +func (r *rabbitMQConn) Connect(secure bool, config *amqp.Config) error { + r.Lock() + + // already connected + if r.connected { + r.Unlock() + return nil + } + + // check it was closed + select { + case <-r.close: + r.close = make(chan bool) + default: + // no op + // new conn + } + + r.Unlock() + + return r.connect(secure, config) +} + +func (r *rabbitMQConn) Close() error { + r.Lock() + defer r.Unlock() + + select { + case <-r.close: + return nil + default: + close(r.close) + r.connected = false + } + + return r.Connection.Close() +} + +func (r *rabbitMQConn) tryConnect(secure bool, config *amqp.Config) error { + var err error + + if config == nil { + config = &defaultAmqpConfig + } + + url := r.url + + if secure || config.TLSClientConfig != nil || strings.HasPrefix(r.url, "amqps://") { + if config.TLSClientConfig == nil { + config.TLSClientConfig = &tls.Config{ + InsecureSkipVerify: true, + } + } + + url = strings.Replace(r.url, "amqp://", "amqps://", 1) + } + + r.Connection, err = dialConfig(url, *config) + + if err != nil { + return err + } + + if r.Channel, err = newRabbitChannel(r.Connection, r.prefetchCount, r.prefetchGlobal); err != nil { + return err + } + + if r.exchange.Durable { + r.Channel.DeclareDurableExchange(r.exchange.Name) + } else { + r.Channel.DeclareExchange(r.exchange.Name) + } + r.ExchangeChannel, err = newRabbitChannel(r.Connection, r.prefetchCount, r.prefetchGlobal) + + return err +} + +func (r *rabbitMQConn) Consume(queue, key string, headers amqp.Table, qArgs amqp.Table, autoAck, durableQueue bool) (*rabbitMQChannel, <-chan amqp.Delivery, error) { + consumerChannel, err := newRabbitChannel(r.Connection, r.prefetchCount, r.prefetchGlobal) + if err != nil { + return nil, nil, err + } + + if durableQueue { + err = consumerChannel.DeclareDurableQueue(queue, qArgs) + } else { + err = consumerChannel.DeclareQueue(queue, qArgs) + } + + if err != nil { + return nil, nil, err + } + + deliveries, err := consumerChannel.ConsumeQueue(queue, autoAck) + if err != nil { + return nil, nil, err + } + + err = consumerChannel.BindQueue(queue, key, r.exchange.Name, headers) + if err != nil { + return nil, nil, err + } + + return consumerChannel, deliveries, nil +} + +func (r *rabbitMQConn) Publish(exchange, key string, msg amqp.Publishing) error { + return r.ExchangeChannel.Publish(exchange, key, msg) +} diff --git a/core/broker/rabbitmq/connection_test.go b/core/broker/rabbitmq/connection_test.go new file mode 100644 index 0000000..3201fb9 --- /dev/null +++ b/core/broker/rabbitmq/connection_test.go @@ -0,0 +1,106 @@ +package rabbitmq + +import ( + "crypto/tls" + "errors" + "testing" + + "github.com/streadway/amqp" +) + +func TestNewRabbitMQConnURL(t *testing.T) { + testcases := []struct { + title string + urls []string + want string + }{ + {"Multiple URLs", []string{"amqp://example.com/one", "amqp://example.com/two"}, "amqp://example.com/one"}, + {"Insecure URL", []string{"amqp://example.com"}, "amqp://example.com"}, + {"Secure URL", []string{"amqps://example.com"}, "amqps://example.com"}, + {"Invalid URL", []string{"http://example.com"}, DefaultRabbitURL}, + {"No URLs", []string{}, DefaultRabbitURL}, + } + + for _, test := range testcases { + conn := newRabbitMQConn(Exchange{Name: "exchange"}, test.urls, 0, false) + + if have, want := conn.url, test.want; have != want { + t.Errorf("%s: invalid url, want %q, have %q", test.title, want, have) + } + } +} + +func TestTryToConnectTLS(t *testing.T) { + var ( + dialCount, dialTLSCount int + + err = errors.New("stop connect here") + ) + + dialConfig = func(_ string, c amqp.Config) (*amqp.Connection, error) { + + if c.TLSClientConfig != nil { + dialTLSCount++ + return nil, err + } + + dialCount++ + return nil, err + } + + testcases := []struct { + title string + url string + secure bool + amqpConfig *amqp.Config + wantTLS bool + }{ + {"unsecure url, secure false, no tls config", "amqp://example.com", false, nil, false}, + {"secure url, secure false, no tls config", "amqps://example.com", false, nil, true}, + {"unsecure url, secure true, no tls config", "amqp://example.com", true, nil, true}, + {"unsecure url, secure false, tls config", "amqp://example.com", false, &amqp.Config{TLSClientConfig: &tls.Config{}}, true}, + } + + for _, test := range testcases { + dialCount, dialTLSCount = 0, 0 + + conn := newRabbitMQConn(Exchange{Name: "exchange"}, []string{test.url}, 0, false) + conn.tryConnect(test.secure, test.amqpConfig) + + have := dialCount + if test.wantTLS { + have = dialTLSCount + } + + if have != 1 { + t.Errorf("%s: used wrong dialer, Dial called %d times, DialTLS called %d times", test.title, dialCount, dialTLSCount) + } + } +} + +func TestNewRabbitMQPrefetch(t *testing.T) { + testcases := []struct { + title string + urls []string + prefetchCount int + prefetchGlobal bool + }{ + {"Multiple URLs", []string{"amqp://example.com/one", "amqp://example.com/two"}, 1, true}, + {"Insecure URL", []string{"amqp://example.com"}, 1, true}, + {"Secure URL", []string{"amqps://example.com"}, 1, true}, + {"Invalid URL", []string{"http://example.com"}, 1, true}, + {"No URLs", []string{}, 1, true}, + } + + for _, test := range testcases { + conn := newRabbitMQConn(Exchange{Name: "exchange"}, test.urls, test.prefetchCount, test.prefetchGlobal) + + if have, want := conn.prefetchCount, test.prefetchCount; have != want { + t.Errorf("%s: invalid prefetch count, want %d, have %d", test.title, want, have) + } + + if have, want := conn.prefetchGlobal, test.prefetchGlobal; have != want { + t.Errorf("%s: invalid prefetch global setting, want %t, have %t", test.title, want, have) + } + } +} diff --git a/core/broker/rabbitmq/context.go b/core/broker/rabbitmq/context.go new file mode 100644 index 0000000..a0f0030 --- /dev/null +++ b/core/broker/rabbitmq/context.go @@ -0,0 +1,37 @@ +package rabbitmq + +import ( + "context" + + "mongo.games.com/goserver/core/broker" +) + +// setSubscribeOption returns a function to setup a context with given value +func setSubscribeOption(k, v interface{}) broker.SubscribeOption { + return func(o *broker.SubscribeOptions) { + if o.Context == nil { + o.Context = context.Background() + } + o.Context = context.WithValue(o.Context, k, v) + } +} + +// setBrokerOption returns a function to setup a context with given value +func setBrokerOption(k, v interface{}) broker.Option { + return func(o *broker.Options) { + if o.Context == nil { + o.Context = context.Background() + } + o.Context = context.WithValue(o.Context, k, v) + } +} + +// setPublishOption returns a function to setup a context with given value +func setPublishOption(k, v interface{}) broker.PublishOption { + return func(o *broker.PublishOptions) { + if o.Context == nil { + o.Context = context.Background() + } + o.Context = context.WithValue(o.Context, k, v) + } +} diff --git a/core/broker/rabbitmq/options.go b/core/broker/rabbitmq/options.go new file mode 100644 index 0000000..380294c --- /dev/null +++ b/core/broker/rabbitmq/options.go @@ -0,0 +1,87 @@ +package rabbitmq + +import ( + "context" + + "mongo.games.com/goserver/core/broker" +) + +type durableQueueKey struct{} +type headersKey struct{} +type queueArgumentsKey struct{} +type prefetchCountKey struct{} +type prefetchGlobalKey struct{} +type exchangeKey struct{} +type requeueOnErrorKey struct{} +type deliveryMode struct{} +type priorityKey struct{} +type externalAuth struct{} +type durableExchange struct{} + +// DurableQueue creates a durable queue when subscribing. +func DurableQueue() broker.SubscribeOption { + return setSubscribeOption(durableQueueKey{}, true) +} + +// DurableExchange is an option to set the Exchange to be durable +func DurableExchange() broker.Option { + return setBrokerOption(durableExchange{}, true) +} + +// Headers adds headers used by the headers exchange +func Headers(h map[string]interface{}) broker.SubscribeOption { + return setSubscribeOption(headersKey{}, h) +} + +// QueueArguments sets arguments for queue creation +func QueueArguments(h map[string]interface{}) broker.SubscribeOption { + return setSubscribeOption(queueArgumentsKey{}, h) +} + +// RequeueOnError calls Nack(muliple:false, requeue:true) on amqp delivery when handler returns error +func RequeueOnError() broker.SubscribeOption { + return setSubscribeOption(requeueOnErrorKey{}, true) +} + +// ExchangeName is an option to set the ExchangeName +func ExchangeName(e string) broker.Option { + return setBrokerOption(exchangeKey{}, e) +} + +// PrefetchCount ... +func PrefetchCount(c int) broker.Option { + return setBrokerOption(prefetchCountKey{}, c) +} + +// PrefetchGlobal creates a durable queue when subscribing. +func PrefetchGlobal() broker.Option { + return setBrokerOption(prefetchGlobalKey{}, true) +} + +// DeliveryMode sets a delivery mode for publishing +func DeliveryMode(value uint8) broker.PublishOption { + return setPublishOption(deliveryMode{}, value) +} + +// Priority sets a priority level for publishing +func Priority(value uint8) broker.PublishOption { + return setPublishOption(priorityKey{}, value) +} + +func ExternalAuth() broker.Option { + return setBrokerOption(externalAuth{}, ExternalAuthentication{}) +} + +type subscribeContextKey struct{} + +// SubscribeContext set the context for broker.SubscribeOption +func SubscribeContext(ctx context.Context) broker.SubscribeOption { + return setSubscribeOption(subscribeContextKey{}, ctx) +} + +type ackSuccessKey struct{} + +// AckOnSuccess will automatically acknowledge messages when no error is returned +func AckOnSuccess() broker.SubscribeOption { + return setSubscribeOption(ackSuccessKey{}, true) +} diff --git a/core/broker/rabbitmq/rabbitmq.go b/core/broker/rabbitmq/rabbitmq.go new file mode 100644 index 0000000..b79b7cf --- /dev/null +++ b/core/broker/rabbitmq/rabbitmq.go @@ -0,0 +1,335 @@ +// Package rabbitmq provides a RabbitMQ broker +package rabbitmq + +import ( + "context" + "errors" + "sync" + "time" + + "github.com/streadway/amqp" + "mongo.games.com/goserver/core/broker" +) + +type rbroker struct { + conn *rabbitMQConn + addrs []string + opts broker.Options + prefetchCount int + prefetchGlobal bool + mtx sync.Mutex + wg sync.WaitGroup +} + +type subscriber struct { + mtx sync.Mutex + mayRun bool + opts broker.SubscribeOptions + topic string + ch *rabbitMQChannel + durableQueue bool + queueArgs map[string]interface{} + r *rbroker + fn func(msg amqp.Delivery) + headers map[string]interface{} +} + +type publication struct { + d amqp.Delivery + m *broker.Message + t string + err error +} + +func (p *publication) Ack() error { + return p.d.Ack(false) +} + +func (p *publication) Error() error { + return p.err +} + +func (p *publication) Topic() string { + return p.t +} + +func (p *publication) Message() *broker.Message { + return p.m +} + +func (s *subscriber) Options() broker.SubscribeOptions { + return s.opts +} + +func (s *subscriber) Topic() string { + return s.topic +} + +func (s *subscriber) Unsubscribe() error { + s.mtx.Lock() + defer s.mtx.Unlock() + s.mayRun = false + if s.ch != nil { + return s.ch.Close() + } + return nil +} + +func (s *subscriber) resubscribe() { + minResubscribeDelay := 100 * time.Millisecond + maxResubscribeDelay := 30 * time.Second + expFactor := time.Duration(2) + reSubscribeDelay := minResubscribeDelay + //loop until unsubscribe + for { + s.mtx.Lock() + mayRun := s.mayRun + s.mtx.Unlock() + if !mayRun { + // we are unsubscribed, showdown routine + return + } + + select { + //check shutdown case + case <-s.r.conn.close: + //yep, its shutdown case + return + //wait until we reconect to rabbit + case <-s.r.conn.waitConnection: + } + + // it may crash (panic) in case of Consume without connection, so recheck it + s.r.mtx.Lock() + if !s.r.conn.connected { + s.r.mtx.Unlock() + continue + } + + ch, sub, err := s.r.conn.Consume( + s.opts.Queue, + s.topic, + s.headers, + s.queueArgs, + s.opts.AutoAck, + s.durableQueue, + ) + + s.r.mtx.Unlock() + switch err { + case nil: + reSubscribeDelay = minResubscribeDelay + s.mtx.Lock() + s.ch = ch + s.mtx.Unlock() + default: + if reSubscribeDelay > maxResubscribeDelay { + reSubscribeDelay = maxResubscribeDelay + } + time.Sleep(reSubscribeDelay) + reSubscribeDelay *= expFactor + continue + } + for d := range sub { + s.r.wg.Add(1) + s.fn(d) + s.r.wg.Done() + } + } +} + +func (r *rbroker) Publish(topic string, msg *broker.Message, opts ...broker.PublishOption) error { + m := amqp.Publishing{ + Body: msg.Body, + Headers: amqp.Table{}, + } + + options := broker.PublishOptions{} + for _, o := range opts { + o(&options) + } + + if options.Context != nil { + if value, ok := options.Context.Value(deliveryMode{}).(uint8); ok { + m.DeliveryMode = value + } + + if value, ok := options.Context.Value(priorityKey{}).(uint8); ok { + m.Priority = value + } + } + + for k, v := range msg.Header { + m.Headers[k] = v + } + + if r.conn == nil { + return errors.New("connection is nil") + } + + return r.conn.Publish(r.conn.exchange.Name, topic, m) +} + +func (r *rbroker) Subscribe(topic string, handler broker.Handler, opts ...broker.SubscribeOption) (broker.Subscriber, error) { + var ackSuccess bool + + if r.conn == nil { + return nil, errors.New("not connected") + } + + opt := broker.SubscribeOptions{ + AutoAck: true, + } + + for _, o := range opts { + o(&opt) + } + + // Make sure context is setup + if opt.Context == nil { + opt.Context = context.Background() + } + + ctx := opt.Context + if subscribeContext, ok := ctx.Value(subscribeContextKey{}).(context.Context); ok && subscribeContext != nil { + ctx = subscribeContext + } + + var requeueOnError bool + requeueOnError, _ = ctx.Value(requeueOnErrorKey{}).(bool) + + var durableQueue bool + durableQueue, _ = ctx.Value(durableQueueKey{}).(bool) + + var qArgs map[string]interface{} + if qa, ok := ctx.Value(queueArgumentsKey{}).(map[string]interface{}); ok { + qArgs = qa + } + + var headers map[string]interface{} + if h, ok := ctx.Value(headersKey{}).(map[string]interface{}); ok { + headers = h + } + + if bval, ok := ctx.Value(ackSuccessKey{}).(bool); ok && bval { + opt.AutoAck = false + ackSuccess = true + } + + fn := func(msg amqp.Delivery) { + header := make(map[string]string) + for k, v := range msg.Headers { + header[k], _ = v.(string) + } + m := &broker.Message{ + Header: header, + Body: msg.Body, + } + p := &publication{d: msg, m: m, t: msg.RoutingKey} + p.err = handler(p) + if p.err == nil && ackSuccess && !opt.AutoAck { + msg.Ack(false) + } else if p.err != nil && !opt.AutoAck { + msg.Nack(false, requeueOnError) + } + } + + sret := &subscriber{topic: topic, opts: opt, mayRun: true, r: r, + durableQueue: durableQueue, fn: fn, headers: headers, queueArgs: qArgs} + + go sret.resubscribe() + + return sret, nil +} + +func (r *rbroker) Options() broker.Options { + return r.opts +} + +func (r *rbroker) String() string { + return "rabbitmq" +} + +func (r *rbroker) Address() string { + if len(r.addrs) > 0 { + return r.addrs[0] + } + return "" +} + +func (r *rbroker) Init(opts ...broker.Option) error { + for _, o := range opts { + o(&r.opts) + } + r.addrs = r.opts.Addrs + return nil +} + +func (r *rbroker) Connect() error { + if r.conn == nil { + r.conn = newRabbitMQConn(r.getExchange(), r.opts.Addrs, r.getPrefetchCount(), r.getPrefetchGlobal()) + } + + conf := defaultAmqpConfig + + if auth, ok := r.opts.Context.Value(externalAuth{}).(ExternalAuthentication); ok { + conf.SASL = []amqp.Authentication{&auth} + } + + conf.TLSClientConfig = r.opts.TLSConfig + + return r.conn.Connect(r.opts.Secure, &conf) +} + +func (r *rbroker) Disconnect() error { + if r.conn == nil { + return errors.New("connection is nil") + } + ret := r.conn.Close() + r.wg.Wait() // wait all goroutines + return ret +} + +func NewBroker(opts ...broker.Option) broker.Broker { + options := broker.Options{ + Context: context.Background(), + } + + for _, o := range opts { + o(&options) + } + + return &rbroker{ + addrs: options.Addrs, + opts: options, + } +} + +func (r *rbroker) getExchange() Exchange { + + ex := DefaultExchange + + if e, ok := r.opts.Context.Value(exchangeKey{}).(string); ok { + ex.Name = e + } + + if d, ok := r.opts.Context.Value(durableExchange{}).(bool); ok { + ex.Durable = d + } + + return ex +} + +func (r *rbroker) getPrefetchCount() int { + if e, ok := r.opts.Context.Value(prefetchCountKey{}).(int); ok { + return e + } + return DefaultPrefetchCount +} + +func (r *rbroker) getPrefetchGlobal() bool { + if e, ok := r.opts.Context.Value(prefetchGlobalKey{}).(bool); ok { + return e + } + return DefaultPrefetchGlobal +} diff --git a/core/broker/rabbitmq/rabbitmq_test.go b/core/broker/rabbitmq/rabbitmq_test.go new file mode 100644 index 0000000..af23c7b --- /dev/null +++ b/core/broker/rabbitmq/rabbitmq_test.go @@ -0,0 +1,37 @@ +package rabbitmq_test + +import ( + "fmt" + "os" + "testing" + + "mongo.games.com/goserver/core/broker" + "mongo.games.com/goserver/core/broker/rabbitmq" +) + +func MyHandler(e broker.Event) error { + fmt.Println(e.Topic(), ":", e.Message()) + return nil +} + +func TestDurable(t *testing.T) { + if tr := os.Getenv("TRAVIS"); len(tr) > 0 { + t.Skip() + } + rabbitmq.DefaultRabbitURL = "amqp://win88:123456@192.168.1.230:5672/win88" + + b := rabbitmq.NewBroker() + b.Init() + if err := b.Connect(); err != nil { + t.Logf("cant conect to broker, skip: %v", err) + t.Skip() + } + + b.Subscribe("test", MyHandler, broker.Queue("queue.default"), + broker.DisableAutoAck(), + rabbitmq.DurableQueue()) + + for i := 0; i < 100; i++ { + b.Publish("test", &broker.Message{Body: []byte("hello")}) + } +} diff --git a/core/build.go b/core/build.go new file mode 100644 index 0000000..ba5a431 --- /dev/null +++ b/core/build.go @@ -0,0 +1,5 @@ +package core + +func BuildTime() string { + return "" //buildTime() +} diff --git a/core/build_darwin.go b/core/build_darwin.go new file mode 100644 index 0000000..db475fd --- /dev/null +++ b/core/build_darwin.go @@ -0,0 +1,18 @@ +package core + +/* +const char* build_time(void) +{ + static const char* psz_build_time = "["__DATE__ " " __TIME__ "]"; + return psz_build_time; +} +*/ +import "C" + +var ( + _linux_buildTime = C.GoString(C.build_time()) +) + +func buildTime() string { + return _linux_buildTime +} diff --git a/core/build_linux.go b/core/build_linux.go new file mode 100644 index 0000000..db475fd --- /dev/null +++ b/core/build_linux.go @@ -0,0 +1,18 @@ +package core + +/* +const char* build_time(void) +{ + static const char* psz_build_time = "["__DATE__ " " __TIME__ "]"; + return psz_build_time; +} +*/ +import "C" + +var ( + _linux_buildTime = C.GoString(C.build_time()) +) + +func buildTime() string { + return _linux_buildTime +} diff --git a/core/build_windows.go b/core/build_windows.go new file mode 100644 index 0000000..4ead3a8 --- /dev/null +++ b/core/build_windows.go @@ -0,0 +1,10 @@ +package core + +import ( + "fmt" + "time" +) + +func buildTime() string { + return fmt.Sprintf("[%s]", time.Now().String()) +} diff --git a/core/buildall.bat b/core/buildall.bat new file mode 100644 index 0000000..7c7a002 --- /dev/null +++ b/core/buildall.bat @@ -0,0 +1,34 @@ +go build +cd admin +go build +cd ../basic +go build +cd ../builtin/action +go build +cd ../filter +go build +cd ../protocol +go build +cd ../../cmdline +go build +cd ../logger +go build +cd ../module +go build +cd ../netlib +go build +cd ../profile +go build +cd ../schedule +go build +cd ../signal +go build +cd ../task +go build +cd ../timer +go build +cd ../transact +go build +cd ../utils +go build +pause diff --git a/core/builtin/action/packetslices.go b/core/builtin/action/packetslices.go new file mode 100644 index 0000000..d372546 --- /dev/null +++ b/core/builtin/action/packetslices.go @@ -0,0 +1,106 @@ +package action + +import ( + "bytes" + "errors" + "strconv" + + "mongo.games.com/goserver/core/builtin/protocol" + "mongo.games.com/goserver/core/netlib" +) + +var ( + SessionAttributeBigBuf = &PacketSlicesHandler{} +) + +type PacketSlicesPacketFactory struct { +} + +type PacketSlicesHandler struct { +} + +func (this *PacketSlicesPacketFactory) CreatePacket() interface{} { + pack := &protocol.SSPacketSlices{} + return pack +} + +func (this *PacketSlicesHandler) Process(s *netlib.Session, packetid int, data interface{}) error { + if packetslices, ok := data.(*protocol.SSPacketSlices); ok { + seqNo := int(packetslices.GetSeqNo()) + if seqNo < 1 { + return errors.New("PacketSlicesHandler unexpect packet seq:" + strconv.Itoa(seqNo)) + } + totalSize := int(packetslices.GetTotalSize()) + if totalSize > s.GetSessionConfig().MaxPacket { + return errors.New("PacketSlicesHandler exceed MaxPacket size:" + strconv.Itoa(s.GetSessionConfig().MaxPacket) + " size=" + strconv.Itoa(totalSize)) + } + attr := s.GetAttribute(SessionAttributeBigBuf) + if seqNo == 1 { + if attr == nil { + attr = bytes.NewBuffer(make([]byte, 0, packetslices.GetTotalSize())) + s.SetAttribute(SessionAttributeBigBuf, attr) + } + } + if seqNo > 1 { + if attr == nil { + return errors.New("PacketSlicesHandler Incorrect packet seq, expect seq=1") + } + } else if attr == nil { + return errors.New("PacketSlicesHandler get bytesbuf failed") + } + + buf := attr.(*bytes.Buffer) + if seqNo == 1 { + buf.Reset() + } + if buf.Len() != int(packetslices.GetOffset()) { + return errors.New("PacketSlicesHandler get next packet offset error") + } + buf.Write(packetslices.GetPacketData()) + if buf.Len() == totalSize { + packetid, pck, err := netlib.UnmarshalPacket(buf.Bytes()) + if err != nil { + return err + } + h := netlib.GetHandler(packetid) + if h != nil { + h.Process(s, packetid, pck) + } + } + } + return nil +} + +func init() { + netlib.RegisterHandler(int(protocol.CoreBuiltinPacketID_PACKET_SS_SLICES), &PacketSlicesHandler{}) + netlib.RegisterFactory(int(protocol.CoreBuiltinPacketID_PACKET_SS_SLICES), &PacketSlicesPacketFactory{}) + + netlib.DefaultBuiltinProtocolEncoder.PacketCutor = func(data []byte) (packid int, packs []interface{}) { + + var ( + offset = 0 + sendSize = 0 + seqNo = 1 + totalSize = len(data) + restSize = len(data) + ) + packid = int(protocol.CoreBuiltinPacketID_PACKET_SS_SLICES) + for restSize > 0 { + sendSize = restSize + if sendSize > netlib.MaxPacketSize-128 { + sendSize = netlib.MaxPacketSize - 128 + } + pack := &protocol.SSPacketSlices{ + SeqNo: int32(seqNo), + TotalSize: int32(totalSize), + Offset: int32(offset), + PacketData: data[offset : offset+sendSize], + } + seqNo++ + restSize -= sendSize + offset += sendSize + packs = append(packs, pack) + } + return + } +} diff --git a/core/builtin/action/txctrlcmd.go b/core/builtin/action/txctrlcmd.go new file mode 100644 index 0000000..6c90898 --- /dev/null +++ b/core/builtin/action/txctrlcmd.go @@ -0,0 +1,45 @@ +package action + +import ( + "errors" + "strconv" + + "google.golang.org/protobuf/proto" + "mongo.games.com/goserver/core/builtin/protocol" + "mongo.games.com/goserver/core/netlib" + "mongo.games.com/goserver/core/transact" +) + +type TxCtrlCmdPacketFactory struct { +} + +type TxCtrlCmdHandler struct { +} + +func (this *TxCtrlCmdPacketFactory) CreatePacket() interface{} { + pack := &protocol.TransactCtrlCmd{} + return pack +} + +func (this *TxCtrlCmdHandler) Process(session *netlib.Session, packetid int, data interface{}) error { + //logger.Logger.Trace("TxCtrlCmdHandler.Process") + if txcmd, ok := data.(*protocol.TransactCtrlCmd); ok { + if !transact.ProcessTransCmd(transact.TransNodeID(txcmd.GetTId()), transact.TransCmd(txcmd.GetCmd())) { + return errors.New("TxCtrlCmdHandler error, tid=" + strconv.FormatInt(txcmd.GetTId(), 16) + " cmd=" + strconv.Itoa(int(txcmd.GetCmd()))) + } + } + return nil +} + +func init() { + netlib.RegisterHandler(int(protocol.CoreBuiltinPacketID_PACKET_SS_TX_CMD), &TxCtrlCmdHandler{}) + netlib.RegisterFactory(int(protocol.CoreBuiltinPacketID_PACKET_SS_TX_CMD), &TxCtrlCmdPacketFactory{}) +} + +func ConstructTxCmdPacket(tnp *transact.TransNodeParam, cmd transact.TransCmd) proto.Message { + packet := &protocol.TransactCtrlCmd{ + TId: int64(tnp.TId), + Cmd: int32(cmd), + } + return packet +} diff --git a/core/builtin/action/txresult.go b/core/builtin/action/txresult.go new file mode 100644 index 0000000..cae6720 --- /dev/null +++ b/core/builtin/action/txresult.go @@ -0,0 +1,55 @@ +package action + +import ( + "errors" + "strconv" + + "google.golang.org/protobuf/proto" + "mongo.games.com/goserver/core/builtin/protocol" + "mongo.games.com/goserver/core/logger" + "mongo.games.com/goserver/core/netlib" + "mongo.games.com/goserver/core/transact" +) + +type TxResultPacketFactory struct { +} + +type TxResultHandler struct { +} + +func (this *TxResultPacketFactory) CreatePacket() interface{} { + pack := &protocol.TransactResult{} + return pack +} + +func (this *TxResultHandler) Process(session *netlib.Session, packetid int, data interface{}) error { + //logger.Logger.Trace("TxResultHandler.Process") + if tr, ok := data.(*protocol.TransactResult); ok { + if !transact.ProcessTransResult(transact.TransNodeID(tr.GetMyTId()), transact.TransNodeID(tr.GetChildTId()), int(tr.GetRetCode()), tr.GetCustomData()) { + return errors.New("TxResultHandler error, tid=" + strconv.FormatInt(tr.GetMyTId(), 16)) + } + } + return nil +} + +func init() { + netlib.RegisterHandler(int(protocol.CoreBuiltinPacketID_PACKET_SS_TX_RESULT), &TxResultHandler{}) + netlib.RegisterFactory(int(protocol.CoreBuiltinPacketID_PACKET_SS_TX_RESULT), &TxResultPacketFactory{}) +} + +func ContructTxResultPacket(parent, me *transact.TransNodeParam, tr *transact.TransResult) proto.Message { + packet := &protocol.TransactResult{ + MyTId: int64(parent.TId), + ChildTId: int64(me.TId), + RetCode: int32(tr.RetCode), + } + if tr.RetFiels != nil { + b, err := netlib.MarshalPacketNoPackId(tr.RetFiels) + if err != nil { + logger.Logger.Warn("ContructTxResultPacket Marshal UserData error:", err) + } else { + packet.CustomData = b + } + } + return packet +} diff --git a/core/builtin/action/txstart.go b/core/builtin/action/txstart.go new file mode 100644 index 0000000..edfc607 --- /dev/null +++ b/core/builtin/action/txstart.go @@ -0,0 +1,110 @@ +package action + +import ( + "errors" + "strconv" + "time" + + "google.golang.org/protobuf/proto" + "mongo.games.com/goserver/core/builtin/protocol" + "mongo.games.com/goserver/core/logger" + "mongo.games.com/goserver/core/netlib" + "mongo.games.com/goserver/core/transact" +) + +type TxStartPacketFactory struct { +} + +type TxStartHandler struct { +} + +func (this *TxStartPacketFactory) CreatePacket() interface{} { + pack := &protocol.TransactStart{} + return pack +} + +func (this *TxStartHandler) Process(session *netlib.Session, packetid int, data interface{}) error { + //logger.Logger.Trace("TxStartHandler.Process") + if ts, ok := data.(*protocol.TransactStart); ok { + netptnp := ts.GetParenTNP() + if netptnp == nil { + return nil + } + netmtnp := ts.GetMyTNP() + if netmtnp == nil { + return nil + } + + ptnp := &transact.TransNodeParam{ + TId: transact.TransNodeID(netptnp.GetTransNodeID()), + Tt: transact.TransType(netptnp.GetTransType()), + Ot: transact.TransOwnerType(netptnp.GetOwnerType()), + Tct: transact.TransactCommitPolicy(netptnp.GetTransCommitType()), + Oid: int(netptnp.GetOwnerID()), + SkeletonID: int(netptnp.GetSkeletonID()), + LevelNo: int(netptnp.GetLevelNo()), + AreaID: int(netptnp.GetAreaID()), + TimeOut: time.Duration(netptnp.GetTimeOut()), + ExpiresTs: netptnp.GetExpiresTs(), + } + mtnp := &transact.TransNodeParam{ + TId: transact.TransNodeID(netmtnp.GetTransNodeID()), + Tt: transact.TransType(netmtnp.GetTransType()), + Ot: transact.TransOwnerType(netmtnp.GetOwnerType()), + Tct: transact.TransactCommitPolicy(netmtnp.GetTransCommitType()), + Oid: int(netmtnp.GetOwnerID()), + SkeletonID: int(netmtnp.GetSkeletonID()), + LevelNo: int(netmtnp.GetLevelNo()), + AreaID: int(netmtnp.GetAreaID()), + TimeOut: time.Duration(netmtnp.GetTimeOut()), + ExpiresTs: netmtnp.GetExpiresTs(), + } + + if !transact.ProcessTransStart(ptnp, mtnp, ts.GetCustomData(), mtnp.TimeOut) { + return errors.New("TxStartHandler error, tid=" + strconv.FormatInt(netmtnp.GetTransNodeID(), 16)) + } + } + return nil +} + +func init() { + netlib.RegisterHandler(int(protocol.CoreBuiltinPacketID_PACKET_SS_TX_START), &TxStartHandler{}) + netlib.RegisterFactory(int(protocol.CoreBuiltinPacketID_PACKET_SS_TX_START), &TxStartPacketFactory{}) +} + +func ContructTxStartPacket(parent, me *transact.TransNodeParam, ud interface{}) proto.Message { + packet := &protocol.TransactStart{ + MyTNP: &protocol.TransactParam{ + TransNodeID: int64(me.TId), + TransType: int32(me.Tt), + OwnerType: int32(me.Ot), + TransCommitType: int32(me.Tct), + OwnerID: int32(me.Oid), + SkeletonID: int32(me.SkeletonID), + LevelNo: int32(me.LevelNo), + AreaID: int32(me.AreaID), + TimeOut: int64(me.TimeOut), + }, + ParenTNP: &protocol.TransactParam{ + TransNodeID: int64(parent.TId), + TransType: int32(parent.Tt), + OwnerType: int32(parent.Ot), + TransCommitType: int32(parent.Tct), + OwnerID: int32(parent.Oid), + SkeletonID: int32(parent.SkeletonID), + LevelNo: int32(parent.LevelNo), + AreaID: int32(parent.AreaID), + TimeOut: int64(parent.TimeOut), + }, + } + + if ud != nil { + b, err := netlib.MarshalPacketNoPackId(ud) + if err != nil { + logger.Logger.Warn("ContructTxStartPacket Marshal UserData error:", err) + } else { + packet.CustomData = b + } + } + return packet +} diff --git a/core/builtin/filter/authentication.go b/core/builtin/filter/authentication.go new file mode 100644 index 0000000..af22e22 --- /dev/null +++ b/core/builtin/filter/authentication.go @@ -0,0 +1,103 @@ +package filter + +import ( + "crypto/md5" + "encoding/hex" + "fmt" + "time" + + "mongo.games.com/goserver/core/builtin/protocol" + "mongo.games.com/goserver/core/logger" + "mongo.games.com/goserver/core/netlib" +) + +var ( + AuthenticationFilterName = "session-filter-auth" + SessionAttributeAuth = &AuthenticationFilter{} +) + +type AuthenticationHandler func(s *netlib.Session, bSuc bool) +type AuthenticationFilter struct { + SessionAuthHandler AuthenticationHandler +} + +func (af *AuthenticationFilter) GetName() string { + return AuthenticationFilterName +} + +func (af *AuthenticationFilter) GetInterestOps() uint { + return 1< time.Second { + if sft.recvCntPerSec > sft.maxRecvPerSec { + sft.maxRecvPerSec = sft.recvCntPerSec + sft.recvCntPerSec = 0 + sft.recvTime = time.Now() + } + } + sft.dump() + sft.Unlock() + return true +} + +func (sft *SessionFilterTrace) OnPacketSent(s *netlib.Session, packetid int, logicNo uint32, data []byte) bool { + //logger.Logger.Tracef("SessionFilterTrace.OnPacketSent sid=%v packetid:%v logicNo:%v size=%d", s.Id, packetid, logicNo, len(data)) + sft.Lock() + sft.sendCntPerSec++ + if time.Now().Sub(sft.sendTime) > time.Second { + if sft.sendCntPerSec > sft.maxSendPerSec { + sft.maxSendPerSec = sft.sendCntPerSec + sft.sendCntPerSec = 0 + sft.sendTime = time.Now() + } + } + sft.dump() + sft.Unlock() + return true +} + +func (sft *SessionFilterTrace) dump() { + if time.Now().Sub(sft.dumpTime) >= time.Minute*5 { + //logger.Logger.Info("Session per five minuts: recvCntPerSec=", sft.recvCntPerSec, " sendCntPerSec=", sft.sendCntPerSec) + sft.dumpTime = time.Now() + } +} +func init() { + netlib.RegisteSessionFilterCreator(SessionFilterTraceName, func() netlib.SessionFilter { + tNow := time.Now() + return &SessionFilterTrace{dumpTime: tNow, recvTime: tNow, sendTime: tNow} + }) +} diff --git a/core/builtin/gen_go.bat b/core/builtin/gen_go.bat new file mode 100644 index 0000000..d996024 --- /dev/null +++ b/core/builtin/gen_go.bat @@ -0,0 +1,13 @@ +@echo off +set work_path=%cd% +set proto_path=%cd%\protocol +set protoc3=%cd%\..\..\bin\protoc-3.5.1-win32\bin\protoc.exe +set protoc-gen-go-plugin-path="%cd%\..\..\bin\protoc-gen-go.exe" + +cd %proto_path% + for %%b in (,*.proto) do ( + echo %%b + %protoc3% --plugin=protoc-gen-go=%protoc-gen-go-plugin-path% --go_out=. %%b + ) + cd .. +pause \ No newline at end of file diff --git a/core/builtin/protocol/corepacketid.pb.go b/core/builtin/protocol/corepacketid.pb.go new file mode 100644 index 0000000..d0a113f --- /dev/null +++ b/core/builtin/protocol/corepacketid.pb.go @@ -0,0 +1,170 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.25.0-devel +// protoc v3.5.1 +// source: corepacketid.proto + +package protocol + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type CoreBuiltinPacketID int32 + +const ( + CoreBuiltinPacketID_PACKET_COREBUILTIN_UNKNOW CoreBuiltinPacketID = 0 + CoreBuiltinPacketID_PACKET_SS_TX_START CoreBuiltinPacketID = -1000 + CoreBuiltinPacketID_PACKET_SS_TX_CMD CoreBuiltinPacketID = -1001 + CoreBuiltinPacketID_PACKET_SS_TX_RESULT CoreBuiltinPacketID = -1002 + CoreBuiltinPacketID_PACKET_SS_SLICES CoreBuiltinPacketID = -1003 + CoreBuiltinPacketID_PACKET_SS_AUTH CoreBuiltinPacketID = -1004 + CoreBuiltinPacketID_PACKET_SS_KEEPALIVE CoreBuiltinPacketID = -1005 + CoreBuiltinPacketID_PACKET_SS_AUTH_ACK CoreBuiltinPacketID = -1006 + CoreBuiltinPacketID_PACKET_SS_RPC_REQ CoreBuiltinPacketID = -1007 + CoreBuiltinPacketID_PACKET_SS_RPC_RESP CoreBuiltinPacketID = -1008 +) + +// Enum value maps for CoreBuiltinPacketID. +var ( + CoreBuiltinPacketID_name = map[int32]string{ + 0: "PACKET_COREBUILTIN_UNKNOW", + -1000: "PACKET_SS_TX_START", + -1001: "PACKET_SS_TX_CMD", + -1002: "PACKET_SS_TX_RESULT", + -1003: "PACKET_SS_SLICES", + -1004: "PACKET_SS_AUTH", + -1005: "PACKET_SS_KEEPALIVE", + -1006: "PACKET_SS_AUTH_ACK", + -1007: "PACKET_SS_RPC_REQ", + -1008: "PACKET_SS_RPC_RESP", + } + CoreBuiltinPacketID_value = map[string]int32{ + "PACKET_COREBUILTIN_UNKNOW": 0, + "PACKET_SS_TX_START": -1000, + "PACKET_SS_TX_CMD": -1001, + "PACKET_SS_TX_RESULT": -1002, + "PACKET_SS_SLICES": -1003, + "PACKET_SS_AUTH": -1004, + "PACKET_SS_KEEPALIVE": -1005, + "PACKET_SS_AUTH_ACK": -1006, + "PACKET_SS_RPC_REQ": -1007, + "PACKET_SS_RPC_RESP": -1008, + } +) + +func (x CoreBuiltinPacketID) Enum() *CoreBuiltinPacketID { + p := new(CoreBuiltinPacketID) + *p = x + return p +} + +func (x CoreBuiltinPacketID) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (CoreBuiltinPacketID) Descriptor() protoreflect.EnumDescriptor { + return file_corepacketid_proto_enumTypes[0].Descriptor() +} + +func (CoreBuiltinPacketID) Type() protoreflect.EnumType { + return &file_corepacketid_proto_enumTypes[0] +} + +func (x CoreBuiltinPacketID) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use CoreBuiltinPacketID.Descriptor instead. +func (CoreBuiltinPacketID) EnumDescriptor() ([]byte, []int) { + return file_corepacketid_proto_rawDescGZIP(), []int{0} +} + +var File_corepacketid_proto protoreflect.FileDescriptor + +var file_corepacketid_proto_rawDesc = []byte{ + 0x0a, 0x12, 0x63, 0x6f, 0x72, 0x65, 0x70, 0x61, 0x63, 0x6b, 0x65, 0x74, 0x69, 0x64, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x08, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x2a, 0xd6, + 0x02, 0x0a, 0x13, 0x43, 0x6f, 0x72, 0x65, 0x42, 0x75, 0x69, 0x6c, 0x74, 0x69, 0x6e, 0x50, 0x61, + 0x63, 0x6b, 0x65, 0x74, 0x49, 0x44, 0x12, 0x1d, 0x0a, 0x19, 0x50, 0x41, 0x43, 0x4b, 0x45, 0x54, + 0x5f, 0x43, 0x4f, 0x52, 0x45, 0x42, 0x55, 0x49, 0x4c, 0x54, 0x49, 0x4e, 0x5f, 0x55, 0x4e, 0x4b, + 0x4e, 0x4f, 0x57, 0x10, 0x00, 0x12, 0x1f, 0x0a, 0x12, 0x50, 0x41, 0x43, 0x4b, 0x45, 0x54, 0x5f, + 0x53, 0x53, 0x5f, 0x54, 0x58, 0x5f, 0x53, 0x54, 0x41, 0x52, 0x54, 0x10, 0x98, 0xf8, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0x01, 0x12, 0x1d, 0x0a, 0x10, 0x50, 0x41, 0x43, 0x4b, 0x45, 0x54, + 0x5f, 0x53, 0x53, 0x5f, 0x54, 0x58, 0x5f, 0x43, 0x4d, 0x44, 0x10, 0x97, 0xf8, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0x01, 0x12, 0x20, 0x0a, 0x13, 0x50, 0x41, 0x43, 0x4b, 0x45, 0x54, 0x5f, + 0x53, 0x53, 0x5f, 0x54, 0x58, 0x5f, 0x52, 0x45, 0x53, 0x55, 0x4c, 0x54, 0x10, 0x96, 0xf8, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x01, 0x12, 0x1d, 0x0a, 0x10, 0x50, 0x41, 0x43, 0x4b, 0x45, + 0x54, 0x5f, 0x53, 0x53, 0x5f, 0x53, 0x4c, 0x49, 0x43, 0x45, 0x53, 0x10, 0x95, 0xf8, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0x01, 0x12, 0x1b, 0x0a, 0x0e, 0x50, 0x41, 0x43, 0x4b, 0x45, 0x54, + 0x5f, 0x53, 0x53, 0x5f, 0x41, 0x55, 0x54, 0x48, 0x10, 0x94, 0xf8, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0x01, 0x12, 0x20, 0x0a, 0x13, 0x50, 0x41, 0x43, 0x4b, 0x45, 0x54, 0x5f, 0x53, 0x53, + 0x5f, 0x4b, 0x45, 0x45, 0x50, 0x41, 0x4c, 0x49, 0x56, 0x45, 0x10, 0x93, 0xf8, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0x01, 0x12, 0x1f, 0x0a, 0x12, 0x50, 0x41, 0x43, 0x4b, 0x45, 0x54, 0x5f, + 0x53, 0x53, 0x5f, 0x41, 0x55, 0x54, 0x48, 0x5f, 0x41, 0x43, 0x4b, 0x10, 0x92, 0xf8, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0x01, 0x12, 0x1e, 0x0a, 0x11, 0x50, 0x41, 0x43, 0x4b, 0x45, 0x54, + 0x5f, 0x53, 0x53, 0x5f, 0x52, 0x50, 0x43, 0x5f, 0x52, 0x45, 0x51, 0x10, 0x91, 0xf8, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0x01, 0x12, 0x1f, 0x0a, 0x12, 0x50, 0x41, 0x43, 0x4b, 0x45, 0x54, + 0x5f, 0x53, 0x53, 0x5f, 0x52, 0x50, 0x43, 0x5f, 0x52, 0x45, 0x53, 0x50, 0x10, 0x90, 0xf8, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x01, 0x42, 0x0c, 0x5a, 0x0a, 0x2e, 0x3b, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_corepacketid_proto_rawDescOnce sync.Once + file_corepacketid_proto_rawDescData = file_corepacketid_proto_rawDesc +) + +func file_corepacketid_proto_rawDescGZIP() []byte { + file_corepacketid_proto_rawDescOnce.Do(func() { + file_corepacketid_proto_rawDescData = protoimpl.X.CompressGZIP(file_corepacketid_proto_rawDescData) + }) + return file_corepacketid_proto_rawDescData +} + +var file_corepacketid_proto_enumTypes = make([]protoimpl.EnumInfo, 1) +var file_corepacketid_proto_goTypes = []interface{}{ + (CoreBuiltinPacketID)(0), // 0: protocol.CoreBuiltinPacketID +} +var file_corepacketid_proto_depIdxs = []int32{ + 0, // [0:0] is the sub-list for method output_type + 0, // [0:0] is the sub-list for method input_type + 0, // [0:0] is the sub-list for extension type_name + 0, // [0:0] is the sub-list for extension extendee + 0, // [0:0] is the sub-list for field type_name +} + +func init() { file_corepacketid_proto_init() } +func file_corepacketid_proto_init() { + if File_corepacketid_proto != nil { + return + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_corepacketid_proto_rawDesc, + NumEnums: 1, + NumMessages: 0, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_corepacketid_proto_goTypes, + DependencyIndexes: file_corepacketid_proto_depIdxs, + EnumInfos: file_corepacketid_proto_enumTypes, + }.Build() + File_corepacketid_proto = out.File + file_corepacketid_proto_rawDesc = nil + file_corepacketid_proto_goTypes = nil + file_corepacketid_proto_depIdxs = nil +} diff --git a/core/builtin/protocol/corepacketid.proto b/core/builtin/protocol/corepacketid.proto new file mode 100644 index 0000000..d87041a --- /dev/null +++ b/core/builtin/protocol/corepacketid.proto @@ -0,0 +1,16 @@ +syntax = "proto3"; +package protocol; +option go_package = ".;protocol"; + +enum CoreBuiltinPacketID { + PACKET_COREBUILTIN_UNKNOW = 0; + PACKET_SS_TX_START = -1000; + PACKET_SS_TX_CMD = -1001; + PACKET_SS_TX_RESULT = -1002; + PACKET_SS_SLICES = -1003; + PACKET_SS_AUTH = -1004; + PACKET_SS_KEEPALIVE = -1005; + PACKET_SS_AUTH_ACK = -1006; + PACKET_SS_RPC_REQ = -1007; + PACKET_SS_RPC_RESP = -1008; +} \ No newline at end of file diff --git a/core/builtin/protocol/goserver_rpc.pb.go b/core/builtin/protocol/goserver_rpc.pb.go new file mode 100644 index 0000000..9b75ad5 --- /dev/null +++ b/core/builtin/protocol/goserver_rpc.pb.go @@ -0,0 +1,253 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.25.0-devel +// protoc v3.5.1 +// source: goserver_rpc.proto + +package protocol + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type RpcRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + ServiceMethod string `protobuf:"bytes,1,opt,name=ServiceMethod,proto3" json:"ServiceMethod,omitempty"` + Seq uint64 `protobuf:"varint,2,opt,name=Seq,proto3" json:"Seq,omitempty"` + Args []byte `protobuf:"bytes,3,opt,name=Args,proto3" json:"Args,omitempty"` +} + +func (x *RpcRequest) Reset() { + *x = RpcRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_goserver_rpc_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *RpcRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RpcRequest) ProtoMessage() {} + +func (x *RpcRequest) ProtoReflect() protoreflect.Message { + mi := &file_goserver_rpc_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RpcRequest.ProtoReflect.Descriptor instead. +func (*RpcRequest) Descriptor() ([]byte, []int) { + return file_goserver_rpc_proto_rawDescGZIP(), []int{0} +} + +func (x *RpcRequest) GetServiceMethod() string { + if x != nil { + return x.ServiceMethod + } + return "" +} + +func (x *RpcRequest) GetSeq() uint64 { + if x != nil { + return x.Seq + } + return 0 +} + +func (x *RpcRequest) GetArgs() []byte { + if x != nil { + return x.Args + } + return nil +} + +type RpcResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + ServiceMethod string `protobuf:"bytes,1,opt,name=ServiceMethod,proto3" json:"ServiceMethod,omitempty"` + Seq uint64 `protobuf:"varint,2,opt,name=Seq,proto3" json:"Seq,omitempty"` + Error string `protobuf:"bytes,3,opt,name=Error,proto3" json:"Error,omitempty"` + Reply []byte `protobuf:"bytes,4,opt,name=Reply,proto3" json:"Reply,omitempty"` +} + +func (x *RpcResponse) Reset() { + *x = RpcResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_goserver_rpc_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *RpcResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RpcResponse) ProtoMessage() {} + +func (x *RpcResponse) ProtoReflect() protoreflect.Message { + mi := &file_goserver_rpc_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RpcResponse.ProtoReflect.Descriptor instead. +func (*RpcResponse) Descriptor() ([]byte, []int) { + return file_goserver_rpc_proto_rawDescGZIP(), []int{1} +} + +func (x *RpcResponse) GetServiceMethod() string { + if x != nil { + return x.ServiceMethod + } + return "" +} + +func (x *RpcResponse) GetSeq() uint64 { + if x != nil { + return x.Seq + } + return 0 +} + +func (x *RpcResponse) GetError() string { + if x != nil { + return x.Error + } + return "" +} + +func (x *RpcResponse) GetReply() []byte { + if x != nil { + return x.Reply + } + return nil +} + +var File_goserver_rpc_proto protoreflect.FileDescriptor + +var file_goserver_rpc_proto_rawDesc = []byte{ + 0x0a, 0x12, 0x67, 0x6f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x72, 0x70, 0x63, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x08, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x22, 0x58, + 0x0a, 0x0a, 0x52, 0x70, 0x63, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x24, 0x0a, 0x0d, + 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x0d, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4d, 0x65, 0x74, 0x68, + 0x6f, 0x64, 0x12, 0x10, 0x0a, 0x03, 0x53, 0x65, 0x71, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, + 0x03, 0x53, 0x65, 0x71, 0x12, 0x12, 0x0a, 0x04, 0x41, 0x72, 0x67, 0x73, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x0c, 0x52, 0x04, 0x41, 0x72, 0x67, 0x73, 0x22, 0x71, 0x0a, 0x0b, 0x52, 0x70, 0x63, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x24, 0x0a, 0x0d, 0x53, 0x65, 0x72, 0x76, 0x69, + 0x63, 0x65, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, + 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x12, 0x10, 0x0a, + 0x03, 0x53, 0x65, 0x71, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x03, 0x53, 0x65, 0x71, 0x12, + 0x14, 0x0a, 0x05, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, + 0x45, 0x72, 0x72, 0x6f, 0x72, 0x12, 0x14, 0x0a, 0x05, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x18, 0x04, + 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x42, 0x0c, 0x5a, 0x0a, 0x2e, + 0x3b, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x33, +} + +var ( + file_goserver_rpc_proto_rawDescOnce sync.Once + file_goserver_rpc_proto_rawDescData = file_goserver_rpc_proto_rawDesc +) + +func file_goserver_rpc_proto_rawDescGZIP() []byte { + file_goserver_rpc_proto_rawDescOnce.Do(func() { + file_goserver_rpc_proto_rawDescData = protoimpl.X.CompressGZIP(file_goserver_rpc_proto_rawDescData) + }) + return file_goserver_rpc_proto_rawDescData +} + +var file_goserver_rpc_proto_msgTypes = make([]protoimpl.MessageInfo, 2) +var file_goserver_rpc_proto_goTypes = []interface{}{ + (*RpcRequest)(nil), // 0: protocol.RpcRequest + (*RpcResponse)(nil), // 1: protocol.RpcResponse +} +var file_goserver_rpc_proto_depIdxs = []int32{ + 0, // [0:0] is the sub-list for method output_type + 0, // [0:0] is the sub-list for method input_type + 0, // [0:0] is the sub-list for extension type_name + 0, // [0:0] is the sub-list for extension extendee + 0, // [0:0] is the sub-list for field type_name +} + +func init() { file_goserver_rpc_proto_init() } +func file_goserver_rpc_proto_init() { + if File_goserver_rpc_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_goserver_rpc_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RpcRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_goserver_rpc_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RpcResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_goserver_rpc_proto_rawDesc, + NumEnums: 0, + NumMessages: 2, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_goserver_rpc_proto_goTypes, + DependencyIndexes: file_goserver_rpc_proto_depIdxs, + MessageInfos: file_goserver_rpc_proto_msgTypes, + }.Build() + File_goserver_rpc_proto = out.File + file_goserver_rpc_proto_rawDesc = nil + file_goserver_rpc_proto_goTypes = nil + file_goserver_rpc_proto_depIdxs = nil +} diff --git a/core/builtin/protocol/goserver_rpc.proto b/core/builtin/protocol/goserver_rpc.proto new file mode 100644 index 0000000..21ec983 --- /dev/null +++ b/core/builtin/protocol/goserver_rpc.proto @@ -0,0 +1,16 @@ +syntax = "proto3"; +package protocol; +option go_package = ".;protocol"; + +message RpcRequest { + string ServiceMethod = 1; + uint64 Seq = 2; + bytes Args = 3; +} + +message RpcResponse { + string ServiceMethod = 1; + uint64 Seq = 2; + string Error = 3; + bytes Reply = 4; +} diff --git a/core/builtin/protocol/keepalive.pb.go b/core/builtin/protocol/keepalive.pb.go new file mode 100644 index 0000000..ae97b05 --- /dev/null +++ b/core/builtin/protocol/keepalive.pb.go @@ -0,0 +1,142 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.25.0-devel +// protoc v3.5.1 +// source: keepalive.proto + +package protocol + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type SSPacketKeepAlive struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Flag int32 `protobuf:"varint,1,opt,name=Flag,proto3" json:"Flag,omitempty"` +} + +func (x *SSPacketKeepAlive) Reset() { + *x = SSPacketKeepAlive{} + if protoimpl.UnsafeEnabled { + mi := &file_keepalive_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *SSPacketKeepAlive) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SSPacketKeepAlive) ProtoMessage() {} + +func (x *SSPacketKeepAlive) ProtoReflect() protoreflect.Message { + mi := &file_keepalive_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SSPacketKeepAlive.ProtoReflect.Descriptor instead. +func (*SSPacketKeepAlive) Descriptor() ([]byte, []int) { + return file_keepalive_proto_rawDescGZIP(), []int{0} +} + +func (x *SSPacketKeepAlive) GetFlag() int32 { + if x != nil { + return x.Flag + } + return 0 +} + +var File_keepalive_proto protoreflect.FileDescriptor + +var file_keepalive_proto_rawDesc = []byte{ + 0x0a, 0x0f, 0x6b, 0x65, 0x65, 0x70, 0x61, 0x6c, 0x69, 0x76, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x12, 0x08, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x22, 0x27, 0x0a, 0x11, 0x53, + 0x53, 0x50, 0x61, 0x63, 0x6b, 0x65, 0x74, 0x4b, 0x65, 0x65, 0x70, 0x41, 0x6c, 0x69, 0x76, 0x65, + 0x12, 0x12, 0x0a, 0x04, 0x46, 0x6c, 0x61, 0x67, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x04, + 0x46, 0x6c, 0x61, 0x67, 0x42, 0x0c, 0x5a, 0x0a, 0x2e, 0x3b, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, + 0x6f, 0x6c, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_keepalive_proto_rawDescOnce sync.Once + file_keepalive_proto_rawDescData = file_keepalive_proto_rawDesc +) + +func file_keepalive_proto_rawDescGZIP() []byte { + file_keepalive_proto_rawDescOnce.Do(func() { + file_keepalive_proto_rawDescData = protoimpl.X.CompressGZIP(file_keepalive_proto_rawDescData) + }) + return file_keepalive_proto_rawDescData +} + +var file_keepalive_proto_msgTypes = make([]protoimpl.MessageInfo, 1) +var file_keepalive_proto_goTypes = []interface{}{ + (*SSPacketKeepAlive)(nil), // 0: protocol.SSPacketKeepAlive +} +var file_keepalive_proto_depIdxs = []int32{ + 0, // [0:0] is the sub-list for method output_type + 0, // [0:0] is the sub-list for method input_type + 0, // [0:0] is the sub-list for extension type_name + 0, // [0:0] is the sub-list for extension extendee + 0, // [0:0] is the sub-list for field type_name +} + +func init() { file_keepalive_proto_init() } +func file_keepalive_proto_init() { + if File_keepalive_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_keepalive_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SSPacketKeepAlive); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_keepalive_proto_rawDesc, + NumEnums: 0, + NumMessages: 1, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_keepalive_proto_goTypes, + DependencyIndexes: file_keepalive_proto_depIdxs, + MessageInfos: file_keepalive_proto_msgTypes, + }.Build() + File_keepalive_proto = out.File + file_keepalive_proto_rawDesc = nil + file_keepalive_proto_goTypes = nil + file_keepalive_proto_depIdxs = nil +} diff --git a/core/builtin/protocol/keepalive.proto b/core/builtin/protocol/keepalive.proto new file mode 100644 index 0000000..e46eeab --- /dev/null +++ b/core/builtin/protocol/keepalive.proto @@ -0,0 +1,7 @@ +syntax = "proto3"; +package protocol; +option go_package = ".;protocol"; + +message SSPacketKeepAlive { + int32 Flag = 1; +} \ No newline at end of file diff --git a/core/builtin/protocol/sessionauth.pb.go b/core/builtin/protocol/sessionauth.pb.go new file mode 100644 index 0000000..6a20663 --- /dev/null +++ b/core/builtin/protocol/sessionauth.pb.go @@ -0,0 +1,214 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.25.0-devel +// protoc v3.5.1 +// source: sessionauth.proto + +package protocol + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type SSPacketAuth struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + AuthKey string `protobuf:"bytes,1,opt,name=AuthKey,proto3" json:"AuthKey,omitempty"` + Timestamp int64 `protobuf:"varint,2,opt,name=Timestamp,proto3" json:"Timestamp,omitempty"` +} + +func (x *SSPacketAuth) Reset() { + *x = SSPacketAuth{} + if protoimpl.UnsafeEnabled { + mi := &file_sessionauth_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *SSPacketAuth) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SSPacketAuth) ProtoMessage() {} + +func (x *SSPacketAuth) ProtoReflect() protoreflect.Message { + mi := &file_sessionauth_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SSPacketAuth.ProtoReflect.Descriptor instead. +func (*SSPacketAuth) Descriptor() ([]byte, []int) { + return file_sessionauth_proto_rawDescGZIP(), []int{0} +} + +func (x *SSPacketAuth) GetAuthKey() string { + if x != nil { + return x.AuthKey + } + return "" +} + +func (x *SSPacketAuth) GetTimestamp() int64 { + if x != nil { + return x.Timestamp + } + return 0 +} + +type SSPacketAuthAck struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Msg string `protobuf:"bytes,1,opt,name=Msg,proto3" json:"Msg,omitempty"` +} + +func (x *SSPacketAuthAck) Reset() { + *x = SSPacketAuthAck{} + if protoimpl.UnsafeEnabled { + mi := &file_sessionauth_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *SSPacketAuthAck) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SSPacketAuthAck) ProtoMessage() {} + +func (x *SSPacketAuthAck) ProtoReflect() protoreflect.Message { + mi := &file_sessionauth_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SSPacketAuthAck.ProtoReflect.Descriptor instead. +func (*SSPacketAuthAck) Descriptor() ([]byte, []int) { + return file_sessionauth_proto_rawDescGZIP(), []int{1} +} + +func (x *SSPacketAuthAck) GetMsg() string { + if x != nil { + return x.Msg + } + return "" +} + +var File_sessionauth_proto protoreflect.FileDescriptor + +var file_sessionauth_proto_rawDesc = []byte{ + 0x0a, 0x11, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x61, 0x75, 0x74, 0x68, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x12, 0x08, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x22, 0x46, 0x0a, + 0x0c, 0x53, 0x53, 0x50, 0x61, 0x63, 0x6b, 0x65, 0x74, 0x41, 0x75, 0x74, 0x68, 0x12, 0x18, 0x0a, + 0x07, 0x41, 0x75, 0x74, 0x68, 0x4b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, + 0x41, 0x75, 0x74, 0x68, 0x4b, 0x65, 0x79, 0x12, 0x1c, 0x0a, 0x09, 0x54, 0x69, 0x6d, 0x65, 0x73, + 0x74, 0x61, 0x6d, 0x70, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x09, 0x54, 0x69, 0x6d, 0x65, + 0x73, 0x74, 0x61, 0x6d, 0x70, 0x22, 0x23, 0x0a, 0x0f, 0x53, 0x53, 0x50, 0x61, 0x63, 0x6b, 0x65, + 0x74, 0x41, 0x75, 0x74, 0x68, 0x41, 0x63, 0x6b, 0x12, 0x10, 0x0a, 0x03, 0x4d, 0x73, 0x67, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x4d, 0x73, 0x67, 0x42, 0x0c, 0x5a, 0x0a, 0x2e, 0x3b, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_sessionauth_proto_rawDescOnce sync.Once + file_sessionauth_proto_rawDescData = file_sessionauth_proto_rawDesc +) + +func file_sessionauth_proto_rawDescGZIP() []byte { + file_sessionauth_proto_rawDescOnce.Do(func() { + file_sessionauth_proto_rawDescData = protoimpl.X.CompressGZIP(file_sessionauth_proto_rawDescData) + }) + return file_sessionauth_proto_rawDescData +} + +var file_sessionauth_proto_msgTypes = make([]protoimpl.MessageInfo, 2) +var file_sessionauth_proto_goTypes = []interface{}{ + (*SSPacketAuth)(nil), // 0: protocol.SSPacketAuth + (*SSPacketAuthAck)(nil), // 1: protocol.SSPacketAuthAck +} +var file_sessionauth_proto_depIdxs = []int32{ + 0, // [0:0] is the sub-list for method output_type + 0, // [0:0] is the sub-list for method input_type + 0, // [0:0] is the sub-list for extension type_name + 0, // [0:0] is the sub-list for extension extendee + 0, // [0:0] is the sub-list for field type_name +} + +func init() { file_sessionauth_proto_init() } +func file_sessionauth_proto_init() { + if File_sessionauth_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_sessionauth_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SSPacketAuth); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_sessionauth_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SSPacketAuthAck); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_sessionauth_proto_rawDesc, + NumEnums: 0, + NumMessages: 2, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_sessionauth_proto_goTypes, + DependencyIndexes: file_sessionauth_proto_depIdxs, + MessageInfos: file_sessionauth_proto_msgTypes, + }.Build() + File_sessionauth_proto = out.File + file_sessionauth_proto_rawDesc = nil + file_sessionauth_proto_goTypes = nil + file_sessionauth_proto_depIdxs = nil +} diff --git a/core/builtin/protocol/sessionauth.proto b/core/builtin/protocol/sessionauth.proto new file mode 100644 index 0000000..206daf8 --- /dev/null +++ b/core/builtin/protocol/sessionauth.proto @@ -0,0 +1,12 @@ +syntax = "proto3"; +package protocol; +option go_package = ".;protocol"; + +message SSPacketAuth { + string AuthKey = 1; + int64 Timestamp = 2; +} + +message SSPacketAuthAck { + string Msg = 1; +} \ No newline at end of file diff --git a/core/builtin/protocol/slices.pb.go b/core/builtin/protocol/slices.pb.go new file mode 100644 index 0000000..738dab2 --- /dev/null +++ b/core/builtin/protocol/slices.pb.go @@ -0,0 +1,171 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.25.0-devel +// protoc v3.5.1 +// source: slices.proto + +package protocol + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type SSPacketSlices struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + SeqNo int32 `protobuf:"varint,1,opt,name=SeqNo,proto3" json:"SeqNo,omitempty"` + TotalSize int32 `protobuf:"varint,2,opt,name=TotalSize,proto3" json:"TotalSize,omitempty"` + Offset int32 `protobuf:"varint,3,opt,name=Offset,proto3" json:"Offset,omitempty"` + PacketData []byte `protobuf:"bytes,4,opt,name=PacketData,proto3" json:"PacketData,omitempty"` +} + +func (x *SSPacketSlices) Reset() { + *x = SSPacketSlices{} + if protoimpl.UnsafeEnabled { + mi := &file_slices_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *SSPacketSlices) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SSPacketSlices) ProtoMessage() {} + +func (x *SSPacketSlices) ProtoReflect() protoreflect.Message { + mi := &file_slices_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SSPacketSlices.ProtoReflect.Descriptor instead. +func (*SSPacketSlices) Descriptor() ([]byte, []int) { + return file_slices_proto_rawDescGZIP(), []int{0} +} + +func (x *SSPacketSlices) GetSeqNo() int32 { + if x != nil { + return x.SeqNo + } + return 0 +} + +func (x *SSPacketSlices) GetTotalSize() int32 { + if x != nil { + return x.TotalSize + } + return 0 +} + +func (x *SSPacketSlices) GetOffset() int32 { + if x != nil { + return x.Offset + } + return 0 +} + +func (x *SSPacketSlices) GetPacketData() []byte { + if x != nil { + return x.PacketData + } + return nil +} + +var File_slices_proto protoreflect.FileDescriptor + +var file_slices_proto_rawDesc = []byte{ + 0x0a, 0x0c, 0x73, 0x6c, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x08, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x22, 0x7c, 0x0a, 0x0e, 0x53, 0x53, 0x50, 0x61, + 0x63, 0x6b, 0x65, 0x74, 0x53, 0x6c, 0x69, 0x63, 0x65, 0x73, 0x12, 0x14, 0x0a, 0x05, 0x53, 0x65, + 0x71, 0x4e, 0x6f, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x53, 0x65, 0x71, 0x4e, 0x6f, + 0x12, 0x1c, 0x0a, 0x09, 0x54, 0x6f, 0x74, 0x61, 0x6c, 0x53, 0x69, 0x7a, 0x65, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x05, 0x52, 0x09, 0x54, 0x6f, 0x74, 0x61, 0x6c, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x16, + 0x0a, 0x06, 0x4f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, 0x52, 0x06, + 0x4f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x12, 0x1e, 0x0a, 0x0a, 0x50, 0x61, 0x63, 0x6b, 0x65, 0x74, + 0x44, 0x61, 0x74, 0x61, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0a, 0x50, 0x61, 0x63, 0x6b, + 0x65, 0x74, 0x44, 0x61, 0x74, 0x61, 0x42, 0x0c, 0x5a, 0x0a, 0x2e, 0x3b, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x63, 0x6f, 0x6c, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_slices_proto_rawDescOnce sync.Once + file_slices_proto_rawDescData = file_slices_proto_rawDesc +) + +func file_slices_proto_rawDescGZIP() []byte { + file_slices_proto_rawDescOnce.Do(func() { + file_slices_proto_rawDescData = protoimpl.X.CompressGZIP(file_slices_proto_rawDescData) + }) + return file_slices_proto_rawDescData +} + +var file_slices_proto_msgTypes = make([]protoimpl.MessageInfo, 1) +var file_slices_proto_goTypes = []interface{}{ + (*SSPacketSlices)(nil), // 0: protocol.SSPacketSlices +} +var file_slices_proto_depIdxs = []int32{ + 0, // [0:0] is the sub-list for method output_type + 0, // [0:0] is the sub-list for method input_type + 0, // [0:0] is the sub-list for extension type_name + 0, // [0:0] is the sub-list for extension extendee + 0, // [0:0] is the sub-list for field type_name +} + +func init() { file_slices_proto_init() } +func file_slices_proto_init() { + if File_slices_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_slices_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SSPacketSlices); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_slices_proto_rawDesc, + NumEnums: 0, + NumMessages: 1, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_slices_proto_goTypes, + DependencyIndexes: file_slices_proto_depIdxs, + MessageInfos: file_slices_proto_msgTypes, + }.Build() + File_slices_proto = out.File + file_slices_proto_rawDesc = nil + file_slices_proto_goTypes = nil + file_slices_proto_depIdxs = nil +} diff --git a/core/builtin/protocol/slices.proto b/core/builtin/protocol/slices.proto new file mode 100644 index 0000000..9a58573 --- /dev/null +++ b/core/builtin/protocol/slices.proto @@ -0,0 +1,10 @@ +syntax = "proto3"; +package protocol; +option go_package = ".;protocol"; + +message SSPacketSlices { + int32 SeqNo = 1; + int32 TotalSize = 2; + int32 Offset = 3; + bytes PacketData = 4; +} \ No newline at end of file diff --git a/core/builtin/protocol/transact.pb.go b/core/builtin/protocol/transact.pb.go new file mode 100644 index 0000000..e2a8eaf --- /dev/null +++ b/core/builtin/protocol/transact.pb.go @@ -0,0 +1,482 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.25.0-devel +// protoc v3.5.1 +// source: transact.proto + +package protocol + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type TransactStart struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + MyTNP *TransactParam `protobuf:"bytes,1,opt,name=MyTNP,proto3" json:"MyTNP,omitempty"` + ParenTNP *TransactParam `protobuf:"bytes,2,opt,name=ParenTNP,proto3" json:"ParenTNP,omitempty"` + CustomData []byte `protobuf:"bytes,3,opt,name=CustomData,proto3" json:"CustomData,omitempty"` +} + +func (x *TransactStart) Reset() { + *x = TransactStart{} + if protoimpl.UnsafeEnabled { + mi := &file_transact_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *TransactStart) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*TransactStart) ProtoMessage() {} + +func (x *TransactStart) ProtoReflect() protoreflect.Message { + mi := &file_transact_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use TransactStart.ProtoReflect.Descriptor instead. +func (*TransactStart) Descriptor() ([]byte, []int) { + return file_transact_proto_rawDescGZIP(), []int{0} +} + +func (x *TransactStart) GetMyTNP() *TransactParam { + if x != nil { + return x.MyTNP + } + return nil +} + +func (x *TransactStart) GetParenTNP() *TransactParam { + if x != nil { + return x.ParenTNP + } + return nil +} + +func (x *TransactStart) GetCustomData() []byte { + if x != nil { + return x.CustomData + } + return nil +} + +type TransactCtrlCmd struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + TId int64 `protobuf:"varint,1,opt,name=TId,proto3" json:"TId,omitempty"` + Cmd int32 `protobuf:"varint,2,opt,name=Cmd,proto3" json:"Cmd,omitempty"` +} + +func (x *TransactCtrlCmd) Reset() { + *x = TransactCtrlCmd{} + if protoimpl.UnsafeEnabled { + mi := &file_transact_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *TransactCtrlCmd) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*TransactCtrlCmd) ProtoMessage() {} + +func (x *TransactCtrlCmd) ProtoReflect() protoreflect.Message { + mi := &file_transact_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use TransactCtrlCmd.ProtoReflect.Descriptor instead. +func (*TransactCtrlCmd) Descriptor() ([]byte, []int) { + return file_transact_proto_rawDescGZIP(), []int{1} +} + +func (x *TransactCtrlCmd) GetTId() int64 { + if x != nil { + return x.TId + } + return 0 +} + +func (x *TransactCtrlCmd) GetCmd() int32 { + if x != nil { + return x.Cmd + } + return 0 +} + +type TransactResult struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + MyTId int64 `protobuf:"varint,1,opt,name=MyTId,proto3" json:"MyTId,omitempty"` + ChildTId int64 `protobuf:"varint,2,opt,name=ChildTId,proto3" json:"ChildTId,omitempty"` + RetCode int32 `protobuf:"varint,3,opt,name=RetCode,proto3" json:"RetCode,omitempty"` + CustomData []byte `protobuf:"bytes,4,opt,name=CustomData,proto3" json:"CustomData,omitempty"` +} + +func (x *TransactResult) Reset() { + *x = TransactResult{} + if protoimpl.UnsafeEnabled { + mi := &file_transact_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *TransactResult) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*TransactResult) ProtoMessage() {} + +func (x *TransactResult) ProtoReflect() protoreflect.Message { + mi := &file_transact_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use TransactResult.ProtoReflect.Descriptor instead. +func (*TransactResult) Descriptor() ([]byte, []int) { + return file_transact_proto_rawDescGZIP(), []int{2} +} + +func (x *TransactResult) GetMyTId() int64 { + if x != nil { + return x.MyTId + } + return 0 +} + +func (x *TransactResult) GetChildTId() int64 { + if x != nil { + return x.ChildTId + } + return 0 +} + +func (x *TransactResult) GetRetCode() int32 { + if x != nil { + return x.RetCode + } + return 0 +} + +func (x *TransactResult) GetCustomData() []byte { + if x != nil { + return x.CustomData + } + return nil +} + +type TransactParam struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + TransNodeID int64 `protobuf:"varint,1,opt,name=TransNodeID,proto3" json:"TransNodeID,omitempty"` + TransType int32 `protobuf:"varint,2,opt,name=TransType,proto3" json:"TransType,omitempty"` + OwnerType int32 `protobuf:"varint,3,opt,name=OwnerType,proto3" json:"OwnerType,omitempty"` + OwnerID int32 `protobuf:"varint,4,opt,name=OwnerID,proto3" json:"OwnerID,omitempty"` + SkeletonID int32 `protobuf:"varint,5,opt,name=SkeletonID,proto3" json:"SkeletonID,omitempty"` + LevelNo int32 `protobuf:"varint,6,opt,name=LevelNo,proto3" json:"LevelNo,omitempty"` + AreaID int32 `protobuf:"varint,7,opt,name=AreaID,proto3" json:"AreaID,omitempty"` + TimeOut int64 `protobuf:"varint,8,opt,name=TimeOut,proto3" json:"TimeOut,omitempty"` + TransCommitType int32 `protobuf:"varint,9,opt,name=TransCommitType,proto3" json:"TransCommitType,omitempty"` + ExpiresTs int64 `protobuf:"varint,10,opt,name=ExpiresTs,proto3" json:"ExpiresTs,omitempty"` +} + +func (x *TransactParam) Reset() { + *x = TransactParam{} + if protoimpl.UnsafeEnabled { + mi := &file_transact_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *TransactParam) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*TransactParam) ProtoMessage() {} + +func (x *TransactParam) ProtoReflect() protoreflect.Message { + mi := &file_transact_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use TransactParam.ProtoReflect.Descriptor instead. +func (*TransactParam) Descriptor() ([]byte, []int) { + return file_transact_proto_rawDescGZIP(), []int{3} +} + +func (x *TransactParam) GetTransNodeID() int64 { + if x != nil { + return x.TransNodeID + } + return 0 +} + +func (x *TransactParam) GetTransType() int32 { + if x != nil { + return x.TransType + } + return 0 +} + +func (x *TransactParam) GetOwnerType() int32 { + if x != nil { + return x.OwnerType + } + return 0 +} + +func (x *TransactParam) GetOwnerID() int32 { + if x != nil { + return x.OwnerID + } + return 0 +} + +func (x *TransactParam) GetSkeletonID() int32 { + if x != nil { + return x.SkeletonID + } + return 0 +} + +func (x *TransactParam) GetLevelNo() int32 { + if x != nil { + return x.LevelNo + } + return 0 +} + +func (x *TransactParam) GetAreaID() int32 { + if x != nil { + return x.AreaID + } + return 0 +} + +func (x *TransactParam) GetTimeOut() int64 { + if x != nil { + return x.TimeOut + } + return 0 +} + +func (x *TransactParam) GetTransCommitType() int32 { + if x != nil { + return x.TransCommitType + } + return 0 +} + +func (x *TransactParam) GetExpiresTs() int64 { + if x != nil { + return x.ExpiresTs + } + return 0 +} + +var File_transact_proto protoreflect.FileDescriptor + +var file_transact_proto_rawDesc = []byte{ + 0x0a, 0x0e, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x12, 0x08, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x22, 0x93, 0x01, 0x0a, 0x0d, 0x54, + 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x53, 0x74, 0x61, 0x72, 0x74, 0x12, 0x2d, 0x0a, 0x05, + 0x4d, 0x79, 0x54, 0x4e, 0x50, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x2e, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x50, + 0x61, 0x72, 0x61, 0x6d, 0x52, 0x05, 0x4d, 0x79, 0x54, 0x4e, 0x50, 0x12, 0x33, 0x0a, 0x08, 0x50, + 0x61, 0x72, 0x65, 0x6e, 0x54, 0x4e, 0x50, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x2e, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, + 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x52, 0x08, 0x50, 0x61, 0x72, 0x65, 0x6e, 0x54, 0x4e, 0x50, + 0x12, 0x1e, 0x0a, 0x0a, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x44, 0x61, 0x74, 0x61, 0x18, 0x03, + 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0a, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x44, 0x61, 0x74, 0x61, + 0x22, 0x35, 0x0a, 0x0f, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x43, 0x74, 0x72, 0x6c, + 0x43, 0x6d, 0x64, 0x12, 0x10, 0x0a, 0x03, 0x54, 0x49, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, + 0x52, 0x03, 0x54, 0x49, 0x64, 0x12, 0x10, 0x0a, 0x03, 0x43, 0x6d, 0x64, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x05, 0x52, 0x03, 0x43, 0x6d, 0x64, 0x22, 0x7c, 0x0a, 0x0e, 0x54, 0x72, 0x61, 0x6e, 0x73, + 0x61, 0x63, 0x74, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x4d, 0x79, 0x54, + 0x49, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x05, 0x4d, 0x79, 0x54, 0x49, 0x64, 0x12, + 0x1a, 0x0a, 0x08, 0x43, 0x68, 0x69, 0x6c, 0x64, 0x54, 0x49, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x03, 0x52, 0x08, 0x43, 0x68, 0x69, 0x6c, 0x64, 0x54, 0x49, 0x64, 0x12, 0x18, 0x0a, 0x07, 0x52, + 0x65, 0x74, 0x43, 0x6f, 0x64, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, 0x52, 0x07, 0x52, 0x65, + 0x74, 0x43, 0x6f, 0x64, 0x65, 0x12, 0x1e, 0x0a, 0x0a, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x44, + 0x61, 0x74, 0x61, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0a, 0x43, 0x75, 0x73, 0x74, 0x6f, + 0x6d, 0x44, 0x61, 0x74, 0x61, 0x22, 0xbb, 0x02, 0x0a, 0x0d, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x61, + 0x63, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x12, 0x20, 0x0a, 0x0b, 0x54, 0x72, 0x61, 0x6e, 0x73, + 0x4e, 0x6f, 0x64, 0x65, 0x49, 0x44, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0b, 0x54, 0x72, + 0x61, 0x6e, 0x73, 0x4e, 0x6f, 0x64, 0x65, 0x49, 0x44, 0x12, 0x1c, 0x0a, 0x09, 0x54, 0x72, 0x61, + 0x6e, 0x73, 0x54, 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x09, 0x54, 0x72, + 0x61, 0x6e, 0x73, 0x54, 0x79, 0x70, 0x65, 0x12, 0x1c, 0x0a, 0x09, 0x4f, 0x77, 0x6e, 0x65, 0x72, + 0x54, 0x79, 0x70, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, 0x52, 0x09, 0x4f, 0x77, 0x6e, 0x65, + 0x72, 0x54, 0x79, 0x70, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x4f, 0x77, 0x6e, 0x65, 0x72, 0x49, 0x44, + 0x18, 0x04, 0x20, 0x01, 0x28, 0x05, 0x52, 0x07, 0x4f, 0x77, 0x6e, 0x65, 0x72, 0x49, 0x44, 0x12, + 0x1e, 0x0a, 0x0a, 0x53, 0x6b, 0x65, 0x6c, 0x65, 0x74, 0x6f, 0x6e, 0x49, 0x44, 0x18, 0x05, 0x20, + 0x01, 0x28, 0x05, 0x52, 0x0a, 0x53, 0x6b, 0x65, 0x6c, 0x65, 0x74, 0x6f, 0x6e, 0x49, 0x44, 0x12, + 0x18, 0x0a, 0x07, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x4e, 0x6f, 0x18, 0x06, 0x20, 0x01, 0x28, 0x05, + 0x52, 0x07, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x4e, 0x6f, 0x12, 0x16, 0x0a, 0x06, 0x41, 0x72, 0x65, + 0x61, 0x49, 0x44, 0x18, 0x07, 0x20, 0x01, 0x28, 0x05, 0x52, 0x06, 0x41, 0x72, 0x65, 0x61, 0x49, + 0x44, 0x12, 0x18, 0x0a, 0x07, 0x54, 0x69, 0x6d, 0x65, 0x4f, 0x75, 0x74, 0x18, 0x08, 0x20, 0x01, + 0x28, 0x03, 0x52, 0x07, 0x54, 0x69, 0x6d, 0x65, 0x4f, 0x75, 0x74, 0x12, 0x28, 0x0a, 0x0f, 0x54, + 0x72, 0x61, 0x6e, 0x73, 0x43, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x54, 0x79, 0x70, 0x65, 0x18, 0x09, + 0x20, 0x01, 0x28, 0x05, 0x52, 0x0f, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x43, 0x6f, 0x6d, 0x6d, 0x69, + 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x1c, 0x0a, 0x09, 0x45, 0x78, 0x70, 0x69, 0x72, 0x65, 0x73, + 0x54, 0x73, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x03, 0x52, 0x09, 0x45, 0x78, 0x70, 0x69, 0x72, 0x65, + 0x73, 0x54, 0x73, 0x42, 0x0c, 0x5a, 0x0a, 0x2e, 0x3b, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, + 0x6c, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_transact_proto_rawDescOnce sync.Once + file_transact_proto_rawDescData = file_transact_proto_rawDesc +) + +func file_transact_proto_rawDescGZIP() []byte { + file_transact_proto_rawDescOnce.Do(func() { + file_transact_proto_rawDescData = protoimpl.X.CompressGZIP(file_transact_proto_rawDescData) + }) + return file_transact_proto_rawDescData +} + +var file_transact_proto_msgTypes = make([]protoimpl.MessageInfo, 4) +var file_transact_proto_goTypes = []interface{}{ + (*TransactStart)(nil), // 0: protocol.TransactStart + (*TransactCtrlCmd)(nil), // 1: protocol.TransactCtrlCmd + (*TransactResult)(nil), // 2: protocol.TransactResult + (*TransactParam)(nil), // 3: protocol.TransactParam +} +var file_transact_proto_depIdxs = []int32{ + 3, // 0: protocol.TransactStart.MyTNP:type_name -> protocol.TransactParam + 3, // 1: protocol.TransactStart.ParenTNP:type_name -> protocol.TransactParam + 2, // [2:2] is the sub-list for method output_type + 2, // [2:2] is the sub-list for method input_type + 2, // [2:2] is the sub-list for extension type_name + 2, // [2:2] is the sub-list for extension extendee + 0, // [0:2] is the sub-list for field type_name +} + +func init() { file_transact_proto_init() } +func file_transact_proto_init() { + if File_transact_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_transact_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*TransactStart); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_transact_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*TransactCtrlCmd); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_transact_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*TransactResult); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_transact_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*TransactParam); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_transact_proto_rawDesc, + NumEnums: 0, + NumMessages: 4, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_transact_proto_goTypes, + DependencyIndexes: file_transact_proto_depIdxs, + MessageInfos: file_transact_proto_msgTypes, + }.Build() + File_transact_proto = out.File + file_transact_proto_rawDesc = nil + file_transact_proto_goTypes = nil + file_transact_proto_depIdxs = nil +} diff --git a/core/builtin/protocol/transact.proto b/core/builtin/protocol/transact.proto new file mode 100644 index 0000000..ef603aa --- /dev/null +++ b/core/builtin/protocol/transact.proto @@ -0,0 +1,34 @@ +syntax = "proto3"; +package protocol; +option go_package = ".;protocol"; + +message TransactStart { + TransactParam MyTNP = 1; + TransactParam ParenTNP = 2; + bytes CustomData = 3; +} + +message TransactCtrlCmd { + int64 TId = 1; + int32 Cmd = 2; +} + +message TransactResult { + int64 MyTId = 1; + int64 ChildTId = 2; + int32 RetCode = 3; + bytes CustomData = 4; +} + +message TransactParam { + int64 TransNodeID = 1; + int32 TransType = 2; + int32 OwnerType = 3; + int32 OwnerID = 4; + int32 SkeletonID = 5; + int32 LevelNo = 6; + int32 AreaID = 7; + int64 TimeOut = 8; + int32 TransCommitType = 9; + int64 ExpiresTs = 10; +} \ No newline at end of file diff --git a/core/cmdline/cmdline.go b/core/cmdline/cmdline.go new file mode 100644 index 0000000..47d2bde --- /dev/null +++ b/core/cmdline/cmdline.go @@ -0,0 +1,121 @@ +package cmdline + +import ( + "bufio" + "fmt" + "os" + "strconv" + "strings" + "time" + + "mongo.games.com/goserver/core/module" +) + +var cmdpool = make(map[string]cmdExecuter) + +type cmdGoroutineWapper struct { +} + +type CmdArg struct { + Flag string + SimplifyFlag string + Required bool +} + +type CmdArgParser struct { + cmeKV map[string]string +} + +type cmdExecuter interface { + Execute(args []string) + ShowUsage() +} + +func NewCmdArgParser(args []string) *CmdArgParser { + parser := &CmdArgParser{ + cmeKV: make(map[string]string), + } + for _, arg := range args { + kv := strings.Split(arg, "=") + if len(kv) == 2 { + k := kv[0] + v := kv[1] + parser.cmeKV[k] = v + } + } + return parser +} + +func RegisteCmd(cmdName string, executer cmdExecuter) { + cmdpool[strings.ToLower(cmdName)] = executer +} + +func (cw *cmdGoroutineWapper) Start() { + if Config.SupportCmdLine { + go func() { + var ( + reader = bufio.NewReader(os.Stdin) + cmdLine []byte + isPrefix bool + err error + ) + + for { + cmdLine, isPrefix, err = reader.ReadLine() + if err == nil && isPrefix == false { + params := strings.Split(string(cmdLine), " ") + if len(params) >= 1 { + cmdName := strings.ToLower(params[0]) + if cmdExecute, exist := cmdpool[cmdName]; exist { + PostCmd(module.AppModule.Object, cmdExecute, params[1:]) + } + } + } + time.Sleep(time.Second) + } + }() + } +} + +func (this *CmdArgParser) ExtraIntArg(arg *CmdArg, val *int) { + if v, exist := this.cmeKV[arg.SimplifyFlag]; !exist { + if v, exist := this.cmeKV[arg.Flag]; !exist && arg.Required { + fmt.Println(arg.Flag, "must be give") + return + } else { + *val, _ = strconv.Atoi(v) + } + } else { + *val, _ = strconv.Atoi(v) + } +} + +func (this *CmdArgParser) ExtraInt64Arg(arg *CmdArg, val *int64) { + if v, exist := this.cmeKV[arg.SimplifyFlag]; !exist { + if v, exist := this.cmeKV[arg.Flag]; !exist && arg.Required { + fmt.Println(arg.Flag, "must be give") + return + } else { + *val, _ = strconv.ParseInt(v, 10, 64) + } + } else { + *val, _ = strconv.ParseInt(v, 10, 64) + } +} + +func (this *CmdArgParser) ExtraStringArg(arg *CmdArg, val *string) { + if v, exist := this.cmeKV[arg.SimplifyFlag]; !exist { + if v, exist := this.cmeKV[arg.Flag]; !exist && arg.Required { + fmt.Println(arg.Flag, "must be give") + return + } else { + *val = v + } + } else { + *val = v + } +} + +func init() { + //module.RegistePreloadModule(&cmdGoroutineWapper{}, 0) +} diff --git a/core/cmdline/cmdline_exit.go b/core/cmdline/cmdline_exit.go new file mode 100644 index 0000000..0302143 --- /dev/null +++ b/core/cmdline/cmdline_exit.go @@ -0,0 +1,22 @@ +package cmdline + +import ( + "fmt" + + "mongo.games.com/goserver/core/module" +) + +type exitExecuter struct { +} + +func (this exitExecuter) Execute(args []string) { + module.Stop() +} + +func (this exitExecuter) ShowUsage() { + fmt.Println("usage: exit") +} + +func init() { + RegisteCmd("exit", &exitExecuter{}) +} diff --git a/core/cmdline/cmdline_help.go b/core/cmdline/cmdline_help.go new file mode 100644 index 0000000..14ef400 --- /dev/null +++ b/core/cmdline/cmdline_help.go @@ -0,0 +1,35 @@ +package cmdline + +import ( + "fmt" +) + +type helpExecuter struct { +} + +func (this helpExecuter) Execute(args []string) { + if len(args) > 0 { + if cmde, exist := cmdpool[args[0]]; exist { + cmde.ShowUsage() + } + } else { + this.ShowUsage() + fmt.Println("The commands are:") + for k, _ := range cmdpool { + if k != "help" { + fmt.Println("\t", k) + } + } + fmt.Println("Use \"help [command]\" for more information about a command.") + } +} + +func (this helpExecuter) ShowUsage() { + fmt.Println("Help is a help command like window or linux shell's command") + fmt.Println("Usage:") + fmt.Println("\t", "help command") +} + +func init() { + RegisteCmd("help", &helpExecuter{}) +} diff --git a/core/cmdline/command_cmdline.go b/core/cmdline/command_cmdline.go new file mode 100644 index 0000000..5ad771b --- /dev/null +++ b/core/cmdline/command_cmdline.go @@ -0,0 +1,20 @@ +package cmdline + +import ( + "mongo.games.com/goserver/core/basic" +) + +type cmdlineCommand struct { + exec cmdExecuter + args []string +} + +func (cmd *cmdlineCommand) Done(o *basic.Object) error { + defer o.ProcessSeqnum() + cmd.exec.Execute(cmd.args) + return nil +} + +func PostCmd(p *basic.Object, exec cmdExecuter, args []string) bool { + return p.SendCommand(&cmdlineCommand{exec: exec, args: args}, true) +} diff --git a/core/cmdline/config.go b/core/cmdline/config.go new file mode 100644 index 0000000..2b67dc1 --- /dev/null +++ b/core/cmdline/config.go @@ -0,0 +1,27 @@ +package cmdline + +import ( + "mongo.games.com/goserver/core" +) + +var Config = Configuration{} + +type Configuration struct { + SupportCmdLine bool +} + +func (c *Configuration) Name() string { + return "cmdline" +} + +func (c *Configuration) Init() error { + return nil +} + +func (c *Configuration) Close() error { + return nil +} + +func init() { + core.RegistePackage(&Config) +} diff --git a/core/config.go b/core/config.go new file mode 100644 index 0000000..4c7054a --- /dev/null +++ b/core/config.go @@ -0,0 +1,32 @@ +package core + +import ( + "runtime" +) + +var Config = Configuration{} + +type Configuration struct { + MaxProcs int + Debug bool +} + +func (c *Configuration) Name() string { + return "core" +} + +func (c *Configuration) Init() error { + if c.MaxProcs <= 0 { + c.MaxProcs = 1 + } + runtime.GOMAXPROCS(c.MaxProcs) + return nil +} + +func (c *Configuration) Close() error { + return nil +} + +func init() { + RegistePackage(&Config) +} diff --git a/core/container/doc.go b/core/container/doc.go new file mode 100644 index 0000000..92b9571 --- /dev/null +++ b/core/container/doc.go @@ -0,0 +1 @@ +package container diff --git a/core/container/queue/queue.go b/core/container/queue/queue.go new file mode 100644 index 0000000..4ef64b9 --- /dev/null +++ b/core/container/queue/queue.go @@ -0,0 +1,12 @@ +// queue +package queue + +import ( + "time" +) + +type Queue interface { + Len() int + Enqueue(interface{}, time.Duration) bool + Dequeue(time.Duration) (interface{}, bool) +} diff --git a/core/container/queue/queue_chan.go b/core/container/queue/queue_chan.go new file mode 100644 index 0000000..a2803df --- /dev/null +++ b/core/container/queue/queue_chan.go @@ -0,0 +1,51 @@ +// queue +package queue + +import "time" + +type queueC struct { + c chan interface{} +} + +func NewQueueC(backlog int) Queue { + return &queueC{ + c: make(chan interface{}, backlog), + } +} + +func (q *queueC) Enqueue(i interface{}, timeout time.Duration) bool { + if timeout > 0 { + timer := time.NewTimer(timeout) + select { + case q.c <- i: + case <-timer.C: + return false + } + } else { + q.c <- i + } + + return true +} + +func (q *queueC) Dequeue(timeout time.Duration) (i interface{}, ok bool) { + if timeout > 0 { + timer := time.NewTimer(timeout) + select { + case i, ok = <-q.c: + return i, ok + case <-timer.C: + return nil, false + } + } else { + select { + case i, ok = <-q.c: + return i, ok + } + } + return nil, false +} + +func (q *queueC) Len() int { + return len(q.c) +} diff --git a/core/container/queue/queue_sync.go b/core/container/queue/queue_sync.go new file mode 100644 index 0000000..92eae47 --- /dev/null +++ b/core/container/queue/queue_sync.go @@ -0,0 +1,50 @@ +package queue + +import ( + "container/list" + "sync" + "time" +) + +type queueS struct { + fifo *list.List + lock *sync.RWMutex +} + +func NewQueueS() Queue { + q := &queueS{ + fifo: list.New(), + lock: new(sync.RWMutex), + } + return q +} + +func (q *queueS) Enqueue(i interface{}, timeout time.Duration) bool { + q.lock.Lock() + q.fifo.PushBack(i) + q.lock.Unlock() + return true +} + +func (q *queueS) Dequeue(timeout time.Duration) (interface{}, bool) { + if q.fifo.Len() == 0 { + return nil, false + } + + q.lock.Lock() + e := q.fifo.Front() + if e != nil { + q.fifo.Remove(e) + q.lock.Unlock() + return e.Value, true + } + q.lock.Unlock() + return nil, false +} + +func (q *queueS) Len() int { + q.lock.RLock() + l := q.fifo.Len() + q.lock.RUnlock() + return l +} diff --git a/core/container/queue/queue_test.go b/core/container/queue/queue_test.go new file mode 100644 index 0000000..272818f --- /dev/null +++ b/core/container/queue/queue_test.go @@ -0,0 +1,85 @@ +// queue_test +package queue + +import ( + "testing" + "time" +) + +func TestSyncQueneEnqueue(t *testing.T) { + const CNT int = 10 + q := NewQueueS() + for i := 0; i < CNT; i++ { + q.Enqueue(i, 0) + } + if q.Len() != CNT { + t.Error("sync queue Enqueue error") + } +} + +func TestSyncQueneDequeue(t *testing.T) { + const CNT int = 10 + q := NewQueueS() + for i := 0; i < CNT; i++ { + q.Enqueue(i, 0) + } + + var ( + b bool = true + d interface{} + cnt int + ) + + for b { + d, b = q.Dequeue(0) + if b { + cnt++ + t.Log("Dequeue data:", d) + } + } + if cnt != CNT { + t.Error("sync queue Dequeue error") + } +} + +func BenchmarkSyncQueneEnqueue(b *testing.B) { + q := NewQueueS() + b.StartTimer() + for i := 0; i < b.N; i++ { + q.Enqueue(i, 0) + } + b.StopTimer() +} + +func BenchmarkSyncQueneDequeue(b *testing.B) { + q := NewQueueS() + for i := 0; i < b.N; i++ { + q.Enqueue(i, 0) + } + b.StartTimer() + for i := 0; i < b.N; i++ { + q.Dequeue(0) + } + b.StopTimer() +} + +func BenchmarkChanQueneEnqueue(b *testing.B) { + q := NewQueueC(b.N) + b.StartTimer() + for i := 0; i < b.N; i++ { + q.Enqueue(i, time.Millisecond) + } + b.StopTimer() +} + +func BenchmarkChanQueneDequeue(b *testing.B) { + q := NewQueueC(b.N) + for i := 0; i < b.N; i++ { + q.Enqueue(i, 0) + } + b.StartTimer() + for i := 0; i < b.N; i++ { + q.Dequeue(0) + } + b.StopTimer() +} diff --git a/core/container/recycler/recycler.go b/core/container/recycler/recycler.go new file mode 100644 index 0000000..022624c --- /dev/null +++ b/core/container/recycler/recycler.go @@ -0,0 +1,87 @@ +// recycler +package recycler + +import ( + "container/list" + "time" +) + +var RecyclerBacklogDefault int = 5 + +type element struct { + when time.Time + data interface{} +} + +type Recycler struct { + get chan interface{} + give chan interface{} + ocf func() interface{} + que *list.List + timeout *time.Timer + makecnt int + name string + running bool +} + +func NewRecycler(backlog int, ocf func() interface{}, name string) *Recycler { + r := &Recycler{ + get: make(chan interface{}, backlog), + give: make(chan interface{}, backlog), + ocf: ocf, + que: list.New(), + timeout: time.NewTimer(time.Minute), + name: name, + running: true, + } + + go r.run() + return r +} + +func (this *Recycler) run() { + RecyclerMgr.registe(this) + defer RecyclerMgr.unregiste(this) + + for this.running { + if this.que.Len() == 0 { + this.que.PushFront(element{when: time.Now(), data: this.ocf()}) + this.makecnt++ + } + + this.timeout.Reset(time.Minute) + e := this.que.Front() + select { + case d := <-this.give: + this.timeout.Stop() + this.que.PushFront(element{when: time.Now(), data: d}) + case this.get <- e.Value.(element).data: + this.timeout.Stop() + this.que.Remove(e) + case <-this.timeout.C: + e := this.que.Front() + for e != nil { + n := e.Next() + if time.Since(e.Value.(element).when) > time.Minute { + this.que.Remove(e) + e.Value = nil + this.makecnt-- + } + e = n + } + } + } +} + +func (this *Recycler) Get() interface{} { + i := <-this.get + return i +} + +func (this *Recycler) Give(i interface{}) { + this.give <- i +} + +func (this *Recycler) Close() { + this.running = false +} diff --git a/core/container/recycler/recycler_bytebuf.go b/core/container/recycler/recycler_bytebuf.go new file mode 100644 index 0000000..c187105 --- /dev/null +++ b/core/container/recycler/recycler_bytebuf.go @@ -0,0 +1,28 @@ +package recycler + +import ( + "bytes" +) + +const ( + BytebufRecyclerBacklog int = 128 +) + +var BytebufRecycler = NewRecycler( + BytebufRecyclerBacklog, + func() interface{} { + return bytes.NewBuffer(nil) + }, + "bytebuf_recycler", +) + +func AllocBytebuf() *bytes.Buffer { + b := BytebufRecycler.Get() + buf := b.(*bytes.Buffer) + buf.Reset() + return buf +} + +func FreeBytebuf(buf *bytes.Buffer) { + BytebufRecycler.Give(buf) +} diff --git a/core/container/recycler/recycler_mgr.go b/core/container/recycler/recycler_mgr.go new file mode 100644 index 0000000..696c89d --- /dev/null +++ b/core/container/recycler/recycler_mgr.go @@ -0,0 +1,45 @@ +package recycler + +import ( + "fmt" + "io" + "sync" +) + +var RecyclerMgr = &recyclerMgr{ + recyclers: make(map[interface{}]*Recycler), + lock: new(sync.Mutex), +} + +type recyclerMgr struct { + recyclers map[interface{}]*Recycler + lock *sync.Mutex +} + +func (this *recyclerMgr) registe(r *Recycler) { + this.lock.Lock() + this.recyclers[r] = r + this.lock.Unlock() +} + +func (this *recyclerMgr) unregiste(r *Recycler) { + this.lock.Lock() + delete(this.recyclers, r) + this.lock.Unlock() +} + +func (this *recyclerMgr) CloseAll() { + this.lock.Lock() + defer this.lock.Unlock() + for _, r := range this.recyclers { + r.Close() + } +} + +func (this *recyclerMgr) Dump(w io.Writer) { + this.lock.Lock() + for _, r := range this.recyclers { + w.Write([]byte(fmt.Sprintf("(%s) alloc object (%d)", r.name, r.makecnt))) + } + this.lock.Unlock() +} diff --git a/core/container/recycler/recycler_test.go b/core/container/recycler/recycler_test.go new file mode 100644 index 0000000..68a70fd --- /dev/null +++ b/core/container/recycler/recycler_test.go @@ -0,0 +1,30 @@ +// recycler_test +package recycler + +import ( + "runtime" + "testing" +) + +func makeBuffer() interface{} { + buf := make([]byte, 0, 1024) + return buf +} + +var MyRecycler = NewRecycler(RecyclerBacklogDefault, makeBuffer, "test") + +func TestGet(t *testing.T) { + MyRecycler.Get() +} + +func TestGive(t *testing.T) { + MyRecycler.Give(nil) +} + +func BenchmarkGet(b *testing.B) { + b.StartTimer() + for i := 0; i < b.N; i++ { + MyRecycler.Get() + } + b.StopTimer() +} diff --git a/core/container/recycler/recycler_timer.go b/core/container/recycler/recycler_timer.go new file mode 100644 index 0000000..c0627f0 --- /dev/null +++ b/core/container/recycler/recycler_timer.go @@ -0,0 +1,29 @@ +package recycler + +import ( + "time" +) + +const ( + NewTimerDefaultDuration time.Duration = time.Minute + TimerRecyclerBacklog int = 128 +) + +var TimerRecycler = NewRecycler( + TimerRecyclerBacklog, + func() interface{} { + return time.NewTimer(NewTimerDefaultDuration) + }, + "timer_recycler", +) + +func GetTimer(timeout time.Duration) *time.Timer { + t := TimerRecycler.Get() + timer := t.(*time.Timer) + timer.Reset(timeout) + return timer +} + +func GiveTimer(t *time.Timer) { + TimerRecycler.Give(t) +} diff --git a/core/container/skiplist/skiplist.go b/core/container/skiplist/skiplist.go new file mode 100644 index 0000000..10d66ee --- /dev/null +++ b/core/container/skiplist/skiplist.go @@ -0,0 +1,703 @@ +// Package skiplist implements skip list based maps and sets. +// +// Skip lists are a data structure that can be used in place of +// balanced trees. Skip lists use probabilistic balancing rather than +// strictly enforced balancing and as a result the algorithms for +// insertion and deletion in skip lists are much simpler and +// significantly faster than equivalent algorithms for balanced trees. +// +// Skip lists were first described in Pugh, William (June 1990). "Skip +// lists: a probabilistic alternative to balanced +// trees". Communications of the ACM 33 (6): 668–676 +package skiplist + +import ( + "math/rand" +) + +// p is the fraction of nodes with level i pointers that also have +// level i+1 pointers. p equal to 1/4 is a good value from the point +// of view of speed and space requirements. If variability of running +// times is a concern, 1/2 is a better value for p. +const p = 0.25 + +const DefaultMaxLevel = 32 + +// A node is a container for key-value pairs that are stored in a skip +// list. +type node struct { + forward []*node + span []int + backward *node + key, value interface{} +} + +// next returns the next node in the skip list containing n. +func (n *node) next() *node { + if len(n.forward) == 0 { + return nil + } + return n.forward[0] +} + +// previous returns the previous node in the skip list containing n. +func (n *node) previous() *node { + return n.backward +} + +// hasNext returns true if n has a next node. +func (n *node) hasNext() bool { + return n.next() != nil +} + +// hasPrevious returns true if n has a previous node. +func (n *node) hasPrevious() bool { + return n.previous() != nil +} + +// A SkipList is a map-like data structure that maintains an ordered +// collection of key-value pairs. Insertion, lookup, and deletion are +// all O(log n) operations. A SkipList can efficiently store up to +// 2^MaxLevel items. +// +// To iterate over a skip list (where s is a +// *SkipList): +// +// for i := s.Iterator(); i.Next(); { +// // do something with i.Key() and i.Value() +// } +type SkipList struct { + lessThan func(l, r interface{}) bool + header *node + footer *node + length int + // MaxLevel determines how many items the SkipList can store + // efficiently (2^MaxLevel). + // + // It is safe to increase MaxLevel to accomodate more + // elements. If you decrease MaxLevel and the skip list + // already contains nodes on higer levels, the effective + // MaxLevel will be the greater of the new MaxLevel and the + // level of the highest node. + // + // A SkipList with MaxLevel equal to 0 is equivalent to a + // standard linked list and will not have any of the nice + // properties of skip lists (probably not what you want). + MaxLevel int +} + +// Len returns the length of s. +func (s *SkipList) Len() int { + return s.length +} + +// Iterator is an interface that you can use to iterate through the +// skip list (in its entirety or fragments). For an use example, see +// the documentation of SkipList. +// +// Key and Value return the key and the value of the current node. +type Iterator interface { + // Next returns true if the iterator contains subsequent elements + // and advances its state to the next element if that is possible. + Next() (ok bool) + // Previous returns true if the iterator contains previous elements + // and rewinds its state to the previous element if that is possible. + Previous() (ok bool) + // Key returns the current key. + Key() interface{} + // Value returns the current value. + Value() interface{} + // Seek reduces iterative seek costs for searching forward into the Skip List + // by remarking the range of keys over which it has scanned before. If the + // requested key occurs prior to the point, the Skip List will start searching + // as a safeguard. It returns true if the key is within the known range of + // the list. + Seek(key interface{}) (ok bool) + // Close this iterator to reap resources associated with it. While not + // strictly required, it will provide extra hints for the garbage collector. + Close() +} + +type iter struct { + current *node + key interface{} + list *SkipList + value interface{} +} + +func (i iter) Key() interface{} { + return i.key +} + +func (i iter) Value() interface{} { + return i.value +} + +func (i *iter) Next() bool { + if !i.current.hasNext() { + return false + } + + i.current = i.current.next() + i.key = i.current.key + i.value = i.current.value + + return true +} + +func (i *iter) Previous() bool { + if !i.current.hasPrevious() { + return false + } + + i.current = i.current.previous() + i.key = i.current.key + i.value = i.current.value + + return true +} + +func (i *iter) Seek(key interface{}) (ok bool) { + current := i.current + list := i.list + + // If the existing iterator outside of the known key range, we should set the + // position back to the beginning of the list. + if current == nil { + current = list.header + } + + // If the target key occurs before the current key, we cannot take advantage + // of the heretofore spent traversal cost to find it; resetting back to the + // beginning is the safest choice. + if current.key != nil && list.lessThan(key, current.key) { + current = list.header + } + + // We should back up to the so that we can seek to our present value if that + // is requested for whatever reason. + if current.backward == nil { + current = list.header + } else { + current = current.backward + } + + current = list.getPath(current, nil, nil, key) + + if current == nil { + return + } + + i.current = current + i.key = current.key + i.value = current.value + + return true +} + +func (i *iter) Close() { + i.key = nil + i.value = nil + i.current = nil + i.list = nil +} + +type rangeIterator struct { + iter + upperLimit interface{} + lowerLimit interface{} +} + +func (i *rangeIterator) Next() bool { + if !i.current.hasNext() { + return false + } + + next := i.current.next() + + if !i.list.lessThan(next.key, i.upperLimit) { + return false + } + + i.current = i.current.next() + i.key = i.current.key + i.value = i.current.value + return true +} + +func (i *rangeIterator) Previous() bool { + if !i.current.hasPrevious() { + return false + } + + previous := i.current.previous() + + if i.list.lessThan(previous.key, i.lowerLimit) { + return false + } + + i.current = i.current.previous() + i.key = i.current.key + i.value = i.current.value + return true +} + +func (i *rangeIterator) Seek(key interface{}) (ok bool) { + if i.list.lessThan(key, i.lowerLimit) { + return + } else if !i.list.lessThan(key, i.upperLimit) { + return + } + + return i.iter.Seek(key) +} + +func (i *rangeIterator) Close() { + i.iter.Close() + i.upperLimit = nil + i.lowerLimit = nil +} + +// Iterator returns an Iterator that will go through all elements s. +func (s *SkipList) Iterator() Iterator { + return &iter{ + current: s.header, + list: s, + } +} + +// Seek returns a bidirectional iterator starting with the first element whose +// key is greater or equal to key; otherwise, a nil iterator is returned. +func (s *SkipList) Seek(key interface{}) Iterator { + current := s.getPath(s.header, nil, nil, key) + if current == nil { + return nil + } + + return &iter{ + current: current, + key: current.key, + list: s, + value: current.value, + } +} + +// SeekToFirst returns a bidirectional iterator starting from the first element +// in the list if the list is populated; otherwise, a nil iterator is returned. +func (s *SkipList) SeekToFirst() Iterator { + if s.length == 0 { + return nil + } + + current := s.header.next() + + return &iter{ + current: current, + key: current.key, + list: s, + value: current.value, + } +} + +// SeekToLast returns a bidirectional iterator starting from the last element +// in the list if the list is populated; otherwise, a nil iterator is returned. +func (s *SkipList) SeekToLast() Iterator { + current := s.footer + if current == nil { + return nil + } + + return &iter{ + current: current, + key: current.key, + list: s, + value: current.value, + } +} + +// Range returns an iterator that will go through all the +// elements of the skip list that are greater or equal than from, but +// less than to. +func (s *SkipList) Range(from, to interface{}) Iterator { + start := s.getPath(s.header, nil, nil, from) + return &rangeIterator{ + iter: iter{ + current: &node{ + forward: []*node{start}, + backward: start, + }, + list: s, + }, + upperLimit: to, + lowerLimit: from, + } +} + +func (s *SkipList) level() int { + return len(s.header.forward) - 1 +} + +func maxInt(x, y int) int { + if x > y { + return x + } + return y +} + +func (s *SkipList) effectiveMaxLevel() int { + return maxInt(s.level(), s.MaxLevel) +} + +// Returns a new random level. +func (s SkipList) randomLevel() (n int) { + for n = 0; n < s.effectiveMaxLevel() && rand.Float64() < p; n++ { + } + return +} + +// Get returns the value associated with key from s (nil if the key is +// not present in s). The second return value is true when the key is +// present. +func (s *SkipList) Get(key interface{}) (value interface{}, ok bool) { + candidate := s.getPath(s.header, nil, nil, key) + + if candidate == nil || candidate.key != key { + return nil, false + } + + return candidate.value, true +} + +// GetGreaterOrEqual finds the node whose key is greater than or equal +// to min. It returns its value, its actual key, and whether such a +// node is present in the skip list. +func (s *SkipList) GetGreaterOrEqual(min interface{}) (actualKey, value interface{}, ok bool) { + candidate := s.getPath(s.header, nil, nil, min) + + if candidate != nil { + return candidate.key, candidate.value, true + } + return nil, nil, false +} + +// getPath populates update with nodes that constitute the path to the +// node that may contain key. The candidate node will be returned. If +// update is nil, it will be left alone (the candidate node will still +// be returned). If update is not nil, but it doesn't have enough +// slots for all the nodes in the path, getPath will panic. +func (s *SkipList) getPath(current *node, update []*node, rank []int, key interface{}) *node { + depth := len(current.forward) - 1 + + for i := depth; i >= 0; i-- { + if rank != nil { + if i != depth { + rank[i] = rank[i+1] + } + } + for current.forward[i] != nil && s.lessThan(current.forward[i].key, key) { + if rank != nil { + rank[i] += current.span[i] + } + current = current.forward[i] + } + if update != nil { + update[i] = current + } + + } + return current.next() +} + +// Sets set the value associated with key in s. +func (s *SkipList) Set(key, value interface{}) { + if key == nil { + panic("goskiplist: nil keys are not supported") + } + // s.level starts from 0, so we need to allocate one. + update := make([]*node, s.level()+1, s.effectiveMaxLevel()+1) + rank := make([]int, s.level()+1, s.effectiveMaxLevel()+1) + candidate := s.getPath(s.header, update, rank, key) + + if candidate != nil && candidate.key == key { + candidate.value = value + return + } + + newLevel := s.randomLevel() + + if currentLevel := s.level(); newLevel > currentLevel { + // there are no pointers for the higher levels in + // update. Header should be there. Also add higher + // level links to the header. + for i := currentLevel + 1; i <= newLevel; i++ { + update = append(update, s.header) + rank = append(rank, 0) + s.header.forward = append(s.header.forward, nil) + s.header.span = append(s.header.span, s.length) + } + } + + newNode := &node{ + forward: make([]*node, newLevel+1, s.effectiveMaxLevel()+1), + span: make([]int, newLevel+1, s.effectiveMaxLevel()+1), + key: key, + value: value, + } + + if previous := update[0]; previous.key != nil { + newNode.backward = previous + } + + for i := 0; i <= newLevel; i++ { + newNode.forward[i] = update[i].forward[i] + update[i].forward[i] = newNode + newNode.span[i] = update[i].span[i] - (rank[0] - rank[i]) + update[i].span[i] = (rank[0] - rank[i]) + 1 + } + + for i := newLevel + 1; i <= s.level(); i++ { + update[i].span[i]++ + } + + s.length++ + + if newNode.forward[0] != nil { + if newNode.forward[0].backward != newNode { + newNode.forward[0].backward = newNode + } + } + + if s.footer == nil || s.lessThan(s.footer.key, key) { + s.footer = newNode + } +} + +// Delete removes the node with the given key. +// +// It returns the old value and whether the node was present. +func (s *SkipList) Delete(key interface{}) (value interface{}, ok bool) { + if key == nil { + panic("goskiplist: nil keys are not supported") + } + update := make([]*node, s.level()+1, s.effectiveMaxLevel()) + candidate := s.getPath(s.header, update, nil, key) + + if candidate == nil || candidate.key != key { + return nil, false + } + + previous := candidate.backward + if s.footer == candidate { + s.footer = previous + } + + next := candidate.next() + if next != nil { + next.backward = previous + } + + for i := 0; i <= s.level(); i++ { + if update[i].forward[i] == candidate { + update[i].span[i] += candidate.span[i] - 1 + update[i].forward[i] = candidate.forward[i] + } else { + update[i].span[i]-- + } + } + + for s.level() > 0 && s.header.forward[s.level()] == nil { + s.header.forward = s.header.forward[:s.level()] + } + s.length-- + + return candidate.value, true +} + +// Find the rank for an element by both score and key. +// +// Returns 0 when the element cannot be found, rank otherwise. +// +// Note that the rank is 1-based due to the span of s.header to the +// first element. +// +func (s *SkipList) GetRank(key interface{}) int { + var rank int + depth := len(s.header.forward) - 1 + current := s.header + for i := depth; i >= 0; i-- { + for current.forward[i] != nil && s.lessThan(current.forward[i].key, key) { + rank += current.span[i] + current = current.forward[i] + } + } + + if current != nil { + current = current.next() + } + if current != nil && current.key == key { + return rank + 1 + } + + return 0 +} + +// Finds an element by its rank. The rank argument needs to be 1-based. +func (s *SkipList) GetElementByRank(rank int) (interface{}, bool) { + var traversed int + depth := len(s.header.forward) - 1 + current := s.header + for i := depth; i >= 0; i-- { + for current.forward[i] != nil && (traversed+current.span[i] <= rank) { + traversed += current.span[i] + current = current.forward[i] + } + + if traversed == rank { + return current.value, true + } + } + + return nil, false +} + +// NewCustomMap returns a new SkipList that will use lessThan as the +// comparison function. lessThan should define a linear order on keys +// you intend to use with the SkipList. +func NewCustomMap(lessThan func(l, r interface{}) bool) *SkipList { + return &SkipList{ + lessThan: lessThan, + header: &node{ + forward: []*node{nil}, + span: []int{0}, + }, + MaxLevel: DefaultMaxLevel, + } +} + +// Ordered is an interface which can be linearly ordered by the +// LessThan method, whereby this instance is deemed to be less than +// other. Additionally, Ordered instances should behave properly when +// compared using == and !=. +type Ordered interface { + LessThan(other Ordered) bool +} + +// New returns a new SkipList. +// +// Its keys must implement the Ordered interface. +func New() *SkipList { + comparator := func(left, right interface{}) bool { + return left.(Ordered).LessThan(right.(Ordered)) + } + return NewCustomMap(comparator) + +} + +// NewIntKey returns a SkipList that accepts int keys. +func NewIntMap() *SkipList { + return NewCustomMap(func(l, r interface{}) bool { + return l.(int) < r.(int) + }) +} + +// NewStringMap returns a SkipList that accepts string keys. +func NewStringMap() *SkipList { + return NewCustomMap(func(l, r interface{}) bool { + return l.(string) < r.(string) + }) +} + +// Set is an ordered set data structure. +// +// Its elements must implement the Ordered interface. It uses a +// SkipList for storage, and it gives you similar performance +// guarantees. +// +// To iterate over a set (where s is a *Set): +// +// for i := s.Iterator(); i.Next(); { +// // do something with i.Key(). +// // i.Value() will be nil. +// } +type Set struct { + skiplist SkipList +} + +// NewSet returns a new Set. +func NewSet() *Set { + comparator := func(left, right interface{}) bool { + return left.(Ordered).LessThan(right.(Ordered)) + } + return NewCustomSet(comparator) +} + +// NewCustomSet returns a new Set that will use lessThan as the +// comparison function. lessThan should define a linear order on +// elements you intend to use with the Set. +func NewCustomSet(lessThan func(l, r interface{}) bool) *Set { + return &Set{skiplist: SkipList{ + lessThan: lessThan, + header: &node{ + forward: []*node{nil}, + span: []int{0}, + }, + MaxLevel: DefaultMaxLevel, + }} +} + +// NewIntSet returns a new Set that accepts int elements. +func NewIntSet() *Set { + return NewCustomSet(func(l, r interface{}) bool { + return l.(int) < r.(int) + }) +} + +// NewStringSet returns a new Set that accepts string elements. +func NewStringSet() *Set { + return NewCustomSet(func(l, r interface{}) bool { + return l.(string) < r.(string) + }) +} + +// Add adds key to s. +func (s *Set) Add(key interface{}) { + s.skiplist.Set(key, nil) +} + +// Remove tries to remove key from the set. It returns true if key was +// present. +func (s *Set) Remove(key interface{}) (ok bool) { + _, ok = s.skiplist.Delete(key) + return ok +} + +// Len returns the length of the set. +func (s *Set) Len() int { + return s.skiplist.Len() +} + +// Contains returns true if key is present in s. +func (s *Set) Contains(key interface{}) bool { + _, ok := s.skiplist.Get(key) + return ok +} + +func (s *Set) Iterator() Iterator { + return s.skiplist.Iterator() +} + +// Range returns an iterator that will go through all the elements of +// the set that are greater or equal than from, but less than to. +func (s *Set) Range(from, to interface{}) Iterator { + return s.skiplist.Range(from, to) +} + +// SetMaxLevel sets MaxLevel in the underlying skip list. +func (s *Set) SetMaxLevel(newMaxLevel int) { + s.skiplist.MaxLevel = newMaxLevel +} + +// GetMaxLevel returns MaxLevel fo the underlying skip list. +func (s *Set) GetMaxLevel() int { + return s.skiplist.MaxLevel +} diff --git a/core/container/skiplist/skiplist_test.go b/core/container/skiplist/skiplist_test.go new file mode 100644 index 0000000..283360c --- /dev/null +++ b/core/container/skiplist/skiplist_test.go @@ -0,0 +1,950 @@ +// Copyright 2012 Google Inc. All rights reserved. +// Author: Ric Szopa (Ryszard) + +// Package skiplist implements skip list based maps and sets. +// +// Skip lists are a data structure that can be used in place of +// balanced trees. Skip lists use probabilistic balancing rather than +// strictly enforced balancing and as a result the algorithms for +// insertion and deletion in skip lists are much simpler and +// significantly faster than equivalent algorithms for balanced trees. +// +// Skip lists were first described in Pugh, William (June 1990). "Skip +// lists: a probabilistic alternative to balanced +// trees". Communications of the ACM 33 (6): 668–676 +package skiplist + +import ( + "fmt" + "math/rand" + "sort" + "testing" +) + +func (s *SkipList) printRepr() { + + fmt.Printf("header:\n") + for i, link := range s.header.forward { + if link != nil { + fmt.Printf("\t%d: -> %v\n", i, link.key) + } else { + fmt.Printf("\t%d: -> END\n", i) + } + } + + for node := s.header.next(); node != nil; node = node.next() { + fmt.Printf("%v: %v (level %d)\n", node.key, node.value, len(node.forward)) + for i, link := range node.forward { + if link != nil { + fmt.Printf("\t%d: -> %v\n", i, link.key) + } else { + fmt.Printf("\t%d: -> END\n", i) + } + } + } + fmt.Println() +} + +func TestInitialization(t *testing.T) { + s := NewCustomMap(func(l, r interface{}) bool { + return l.(int) < r.(int) + }) + if !s.lessThan(1, 2) { + t.Errorf("Less than doesn't work correctly.") + } +} + +func TestEmptyNodeNext(t *testing.T) { + n := new(node) + if next := n.next(); next != nil { + t.Errorf("Next() should be nil for an empty node.") + } + + if n.hasNext() { + t.Errorf("hasNext() should be false for an empty node.") + } +} + +func TestEmptyNodePrev(t *testing.T) { + n := new(node) + if previous := n.previous(); previous != nil { + t.Errorf("Previous() should be nil for an empty node.") + } + + if n.hasPrevious() { + t.Errorf("hasPrevious() should be false for an empty node.") + } +} + +func TestNodeHasNext(t *testing.T) { + s := NewIntMap() + s.Set(0, 0) + node := s.header.next() + if node.key != 0 { + t.Fatalf("We got the wrong node: %v.", node) + } + + if node.hasNext() { + t.Errorf("%v should be the last node.", node) + } +} + +func TestNodeHasPrev(t *testing.T) { + s := NewIntMap() + s.Set(0, 0) + node := s.header.previous() + if node != nil { + t.Fatalf("Expected no previous entry, got %v.", node) + } +} + +func (s *SkipList) check(t *testing.T, key, wanted int) { + if got, _ := s.Get(key); got != wanted { + t.Errorf("For key %v wanted value %v, got %v.", key, wanted, got) + } +} + +func TestGet(t *testing.T) { + s := NewIntMap() + s.Set(0, 0) + + if value, present := s.Get(0); !(value == 0 && present) { + t.Errorf("%v, %v instead of %v, %v", value, present, 0, true) + } + + if value, present := s.Get(100); value != nil || present { + t.Errorf("%v, %v instead of %v, %v", value, present, nil, false) + } +} + +func TestGetGreaterOrEqual(t *testing.T) { + s := NewIntMap() + + if _, value, present := s.GetGreaterOrEqual(5); !(value == nil && !present) { + t.Errorf("s.GetGreaterOrEqual(5) should have returned nil and false for an empty map, not %v and %v.", value, present) + } + + s.Set(0, 0) + + if _, value, present := s.GetGreaterOrEqual(5); !(value == nil && !present) { + t.Errorf("s.GetGreaterOrEqual(5) should have returned nil and false for an empty map, not %v and %v.", value, present) + } + + s.Set(10, 10) + + if key, value, present := s.GetGreaterOrEqual(5); !(value == 10 && key == 10 && present) { + t.Errorf("s.GetGreaterOrEqual(5) should have returned 10 and true, not %v and %v.", value, present) + } +} + +func TestSet(t *testing.T) { + s := NewIntMap() + if l := s.Len(); l != 0 { + t.Errorf("Len is not 0, it is %v", l) + } + + s.Set(0, 0) + s.Set(1, 1) + if l := s.Len(); l != 2 { + t.Errorf("Len is not 2, it is %v", l) + } + s.check(t, 0, 0) + if t.Failed() { + t.Errorf("header.Next() after s.Set(0, 0) and s.Set(1, 1): %v.", s.header.next()) + } + s.check(t, 1, 1) + +} + +func TestChange(t *testing.T) { + s := NewIntMap() + s.Set(0, 0) + s.Set(1, 1) + s.Set(2, 2) + + s.Set(0, 7) + if value, _ := s.Get(0); value != 7 { + t.Errorf("Value should be 7, not %d", value) + } + s.Set(1, 8) + if value, _ := s.Get(1); value != 8 { + t.Errorf("Value should be 8, not %d", value) + } + +} + +func TestDelete(t *testing.T) { + s := NewIntMap() + for i := 0; i < 10; i++ { + s.Set(i, i) + } + for i := 0; i < 10; i += 2 { + s.Delete(i) + } + + for i := 0; i < 10; i += 2 { + if _, present := s.Get(i); present { + t.Errorf("%d should not be present in s", i) + } + } + + if v, present := s.Delete(10000); v != nil || present { + t.Errorf("Deleting a non-existent key should return nil, false, and not %v, %v.", v, present) + } + + if t.Failed() { + s.printRepr() + } + +} + +func TestLen(t *testing.T) { + s := NewIntMap() + for i := 0; i < 10; i++ { + s.Set(i, i) + } + if length := s.Len(); length != 10 { + t.Errorf("Length should be equal to 10, not %v.", length) + s.printRepr() + } + for i := 0; i < 5; i++ { + s.Delete(i) + } + if length := s.Len(); length != 5 { + t.Errorf("Length should be equal to 5, not %v.", length) + } + + s.Delete(10000) + + if length := s.Len(); length != 5 { + t.Errorf("Length should be equal to 5, not %v.", length) + } + +} + +func TestIteration(t *testing.T) { + s := NewIntMap() + for i := 0; i < 20; i++ { + s.Set(i, i) + } + + seen := 0 + var lastKey int + + i := s.Iterator() + defer i.Close() + + for i.Next() { + seen++ + lastKey = i.Key().(int) + if i.Key() != i.Value() { + t.Errorf("Wrong value for key %v: %v.", i.Key(), i.Value()) + } + } + + if seen != s.Len() { + t.Errorf("Not all the items in s where iterated through (seen %d, should have seen %d). Last one seen was %d.", seen, s.Len(), lastKey) + } + + for i.Previous() { + if i.Key() != i.Value() { + t.Errorf("Wrong value for key %v: %v.", i.Key(), i.Value()) + } + + if i.Key().(int) >= lastKey { + t.Errorf("Expected key to descend but ascended from %v to %v.", lastKey, i.Key()) + } + + lastKey = i.Key().(int) + } + + if lastKey != 0 { + t.Errorf("Expected to count back to zero, but stopped at key %v.", lastKey) + } +} + +func TestRangeIteration(t *testing.T) { + s := NewIntMap() + for i := 0; i < 20; i++ { + s.Set(i, i) + } + + max, min := 0, 100000 + var lastKey, seen int + + i := s.Range(5, 10) + defer i.Close() + + for i.Next() { + seen++ + lastKey = i.Key().(int) + if lastKey > max { + max = lastKey + } + if lastKey < min { + min = lastKey + } + if i.Key() != i.Value() { + t.Errorf("Wrong value for key %v: %v.", i.Key(), i.Value()) + } + } + + if seen != 5 { + t.Errorf("The number of items yielded is incorrect (should be 5, was %v)", seen) + } + if min != 5 { + t.Errorf("The smallest element should have been 5, not %v", min) + } + + if max != 9 { + t.Errorf("The largest element should have been 9, not %v", max) + } + + if i.Seek(4) { + t.Error("Allowed to seek to invalid range.") + } + + if !i.Seek(5) { + t.Error("Could not seek to an allowed range.") + } + if i.Key().(int) != 5 || i.Value().(int) != 5 { + t.Errorf("Expected 5 for key and 5 for value, got %d and %d", i.Key(), i.Value()) + } + + if !i.Seek(7) { + t.Error("Could not seek to an allowed range.") + } + if i.Key().(int) != 7 || i.Value().(int) != 7 { + t.Errorf("Expected 7 for key and 7 for value, got %d and %d", i.Key(), i.Value()) + } + + if i.Seek(10) { + t.Error("Allowed to seek to invalid range.") + } + + i.Seek(9) + + seen = 0 + min = 100000 + max = -1 + + for i.Previous() { + seen++ + lastKey = i.Key().(int) + if lastKey > max { + max = lastKey + } + if lastKey < min { + min = lastKey + } + if i.Key() != i.Value() { + t.Errorf("Wrong value for key %v: %v.", i.Key(), i.Value()) + } + } + + if seen != 4 { + t.Errorf("The number of items yielded is incorrect (should be 5, was %v)", seen) + } + if min != 5 { + t.Errorf("The smallest element should have been 5, not %v", min) + } + + if max != 8 { + t.Errorf("The largest element should have been 9, not %v", max) + } +} + +func TestSomeMore(t *testing.T) { + s := NewIntMap() + insertions := [...]int{4, 1, 2, 9, 10, 7, 3} + for _, i := range insertions { + s.Set(i, i) + } + for _, i := range insertions { + s.check(t, i, i) + } + +} + +func makeRandomList(n int) *SkipList { + s := NewIntMap() + for i := 0; i < n; i++ { + insert := rand.Int() + s.Set(insert, insert) + } + return s +} + +func LookupBenchmark(b *testing.B, n int) { + b.StopTimer() + s := makeRandomList(n) + b.StartTimer() + for i := 0; i < b.N; i++ { + s.Get(rand.Int()) + } +} + +func SetBenchmark(b *testing.B, n int) { + b.StopTimer() + values := []int{} + for i := 0; i < b.N; i++ { + values = append(values, rand.Int()) + } + s := NewIntMap() + b.StartTimer() + for i := 0; i < b.N; i++ { + s.Set(values[i], values[i]) + } +} + +// Make sure that all the keys are unique and are returned in order. +func TestSanity(t *testing.T) { + s := NewIntMap() + for i := 0; i < 10000; i++ { + insert := rand.Int() + s.Set(insert, insert) + } + var last int = 0 + + i := s.Iterator() + defer i.Close() + + for i.Next() { + if last != 0 && i.Key().(int) <= last { + t.Errorf("Not in order!") + } + last = i.Key().(int) + } + + for i.Previous() { + if last != 0 && i.Key().(int) > last { + t.Errorf("Not in order!") + } + last = i.Key().(int) + } +} + +type MyOrdered struct { + value int +} + +func (me MyOrdered) LessThan(other Ordered) bool { + return me.value < other.(MyOrdered).value +} + +func TestOrdered(t *testing.T) { + s := New() + s.Set(MyOrdered{0}, 0) + s.Set(MyOrdered{1}, 1) + + if val, _ := s.Get(MyOrdered{0}); val != 0 { + t.Errorf("Wrong value for MyOrdered{0}. Should have been %d.", val) + } +} + +func TestNewStringMap(t *testing.T) { + s := NewStringMap() + s.Set("a", 1) + s.Set("b", 2) + if value, _ := s.Get("a"); value != 1 { + t.Errorf("Expected 1, got %v.", value) + } +} + +func TestGetNilKey(t *testing.T) { + s := NewStringMap() + if v, present := s.Get(nil); v != nil || present { + t.Errorf("s.Get(nil) should return nil, false (not %v, %v).", v, present) + } + +} + +func TestSetNilKey(t *testing.T) { + s := NewStringMap() + + defer func() { + if err := recover(); err == nil { + t.Errorf("s.Set(nil, 0) should have panicked.") + } + }() + + s.Set(nil, 0) + +} + +func TestSetMaxLevelInFlight(t *testing.T) { + s := NewIntMap() + s.MaxLevel = 2 + for i := 0; i < 64; i++ { + insert := 2 * rand.Int() + s.Set(insert, insert) + } + + s.MaxLevel = 64 + for i := 0; i < 65536; i++ { + insert := 2*rand.Int() + 1 + s.Set(insert, insert) + } + + i := s.Iterator() + defer i.Close() + + for i.Next() { + if v, _ := s.Get(i.Key()); v != i.Key() { + t.Errorf("Bad values in the skip list (%v). Inserted before the call to s.SetMax(): %t.", v, i.Key().(int)%2 == 0) + } + } +} + +func TestDeletingHighestLevelNodeDoesntBreakSkiplist(t *testing.T) { + s := NewIntMap() + elements := []int{1, 3, 5, 7, 0, 4, 5, 10, 11} + + for _, i := range elements { + s.Set(i, i) + } + + highestLevelNode := s.header.forward[len(s.header.forward)-1] + + s.Delete(highestLevelNode.key) + + seen := 0 + i := s.Iterator() + defer i.Close() + + for i.Next() { + seen++ + } + if seen == 0 { + t.Errorf("Iteration is broken (no elements seen).") + } +} + +func TestNewSet(t *testing.T) { + set := NewIntSet() + elements := []int{1, 3, 5, 7, 0, 4, 5} + + for _, i := range elements { + set.Add(i) + } + + if length := set.Len(); length != 6 { + t.Errorf("set.Len() should be equal to 6, not %v.", length) + } + + if !set.Contains(3) { + t.Errorf("set should contain 3.") + } + + if set.Contains(1000) { + t.Errorf("set should not contain 1000.") + } + + removed := set.Remove(1) + + if !removed { + t.Errorf("Remove returned false for element that was present in set.") + } + + seen := 0 + i := set.Iterator() + defer i.Close() + + for i.Next() { + seen++ + } + + if seen != 5 { + t.Errorf("Iterator() iterated through %v elements. Should have been 5.", seen) + } + + if set.Contains(1) { + t.Errorf("1 was removed, set should not contain 1.") + } + + if length := set.Len(); length != 5 { + t.Errorf("After removing one element, set.Len() should be equal to 5, not %v.", length) + } + + set.SetMaxLevel(10) + if ml := set.GetMaxLevel(); ml != 10 { + t.Errorf("MaxLevel for set should be 10, not %v", ml) + } + +} + +func TestSetRangeIterator(t *testing.T) { + set := NewIntSet() + elements := []int{0, 1, 3, 5} + + for _, i := range elements { + set.Add(i) + } + + seen := 0 + for i := set.Range(2, 1000); i.Next(); { + seen++ + } + if seen != 2 { + t.Errorf("There should have been 2 elements in Range(2, 1000), not %v.", seen) + } + +} + +func TestNewStringSet(t *testing.T) { + set := NewStringSet() + strings := []string{"ala", "ma", "kota"} + for _, v := range strings { + set.Add(v) + } + + if !set.Contains("ala") { + t.Errorf("set should contain \"ala\".") + } +} + +func TestIteratorPrevHoles(t *testing.T) { + m := NewIntMap() + + i := m.Iterator() + defer i.Close() + + m.Set(0, 0) + m.Set(1, 1) + m.Set(2, 2) + + if !i.Next() { + t.Errorf("Expected iterator to move successfully to the next.") + } + + if !i.Next() { + t.Errorf("Expected iterator to move successfully to the next.") + } + + if !i.Next() { + t.Errorf("Expected iterator to move successfully to the next.") + } + + if i.Key().(int) != 2 || i.Value().(int) != 2 { + t.Errorf("Expected iterator to reach key 2 and value 2, got %v and %v.", i.Key(), i.Value()) + } + + if !i.Previous() { + t.Errorf("Expected iterator to move successfully to the previous.") + } + + if i.Key().(int) != 1 || i.Value().(int) != 1 { + t.Errorf("Expected iterator to reach key 1 and value 1, got %v and %v.", i.Key(), i.Value()) + } + + if !i.Next() { + t.Errorf("Expected iterator to move successfully to the next.") + } + + m.Delete(1) + + if !i.Previous() { + t.Errorf("Expected iterator to move successfully to the previous.") + } + + if i.Key().(int) != 0 || i.Value().(int) != 0 { + t.Errorf("Expected iterator to reach key 0 and value 0, got %v and %v.", i.Key(), i.Value()) + } +} + +func TestIteratorSeek(t *testing.T) { + m := NewIntMap() + + i := m.Seek(0) + + if i != nil { + t.Errorf("Expected nil iterator, but got %v.", i) + } + + i = m.SeekToFirst() + + if i != nil { + t.Errorf("Expected nil iterator, but got %v.", i) + } + + i = m.SeekToLast() + + if i != nil { + t.Errorf("Expected nil iterator, but got %v.", i) + } + + m.Set(0, 0) + + i = m.SeekToFirst() + defer i.Close() + + if i.Key().(int) != 0 || i.Value().(int) != 0 { + t.Errorf("Expected iterator to reach key 0 and value 0, got %v and %v.", i.Key(), i.Value()) + } + + i = m.SeekToLast() + defer i.Close() + + if i.Key().(int) != 0 || i.Value().(int) != 0 { + t.Errorf("Expected iterator to reach key 0 and value 0, got %v and %v.", i.Key(), i.Value()) + } + + m.Set(1, 1) + + i = m.SeekToFirst() + defer i.Close() + + if i.Key().(int) != 0 || i.Value().(int) != 0 { + t.Errorf("Expected iterator to reach key 0 and value 0, got %v and %v.", i.Key(), i.Value()) + } + + i = m.SeekToLast() + defer i.Close() + + if i.Key().(int) != 1 || i.Value().(int) != 1 { + t.Errorf("Expected iterator to reach key 1 and value 1, got %v and %v.", i.Key(), i.Value()) + } + + m.Set(2, 2) + + i = m.SeekToFirst() + defer i.Close() + + if i.Key().(int) != 0 || i.Value().(int) != 0 { + t.Errorf("Expected iterator to reach key 0 and value 0, got %v and %v.", i.Key(), i.Value()) + } + + i = m.SeekToLast() + defer i.Close() + + if i.Key().(int) != 2 || i.Value().(int) != 2 { + t.Errorf("Expected iterator to reach key 2 and value 2, got %v and %v.", i.Key(), i.Value()) + } + + i = m.Seek(0) + defer i.Close() + + if i.Key().(int) != 0 || i.Value().(int) != 0 { + t.Errorf("Expected iterator to reach key 0 and value 0, got %v and %v.", i.Key(), i.Value()) + } + + i = m.Seek(2) + defer i.Close() + + if i.Key().(int) != 2 || i.Value().(int) != 2 { + t.Errorf("Expected iterator to reach key 2 and value 2, got %v and %v.", i.Key(), i.Value()) + } + + i = m.Seek(1) + defer i.Close() + + if i.Key().(int) != 1 || i.Value().(int) != 1 { + t.Errorf("Expected iterator to reach key 1 and value 1, got %v and %v.", i.Key(), i.Value()) + } + + i = m.Seek(3) + + if i != nil { + t.Errorf("Expected to receive nil iterator, got %v.", i) + } + + m.Set(4, 4) + + i = m.Seek(4) + defer i.Close() + + if i.Key().(int) != 4 || i.Value().(int) != 4 { + t.Errorf("Expected iterator to reach key 4 and value 4, got %v and %v.", i.Key(), i.Value()) + } + + i = m.Seek(3) + defer i.Close() + + if i.Key().(int) != 4 || i.Value().(int) != 4 { + t.Errorf("Expected iterator to reach key 4 and value 4, got %v and %v.", i.Key(), i.Value()) + } + + m.Delete(4) + + i = m.SeekToFirst() + defer i.Close() + + if i.Key().(int) != 0 || i.Value().(int) != 0 { + t.Errorf("Expected iterator to reach key 0 and value 0, got %v and %v.", i.Key(), i.Value()) + } + + i = m.SeekToLast() + defer i.Close() + + if i.Key().(int) != 2 || i.Value().(int) != 2 { + t.Errorf("Expected iterator to reach key 2 and value 2, got %v and %v.", i.Key(), i.Value()) + } + + if !i.Seek(2) { + t.Error("Expected iterator to seek to key.") + } + + if i.Key().(int) != 2 || i.Value().(int) != 2 { + t.Errorf("Expected iterator to reach key 2 and value 2, got %v and %v.", i.Key(), i.Value()) + } + + if !i.Seek(1) { + t.Error("Expected iterator to seek to key.") + } + + if i.Key().(int) != 1 || i.Value().(int) != 1 { + t.Errorf("Expected iterator to reach key 1 and value 1, got %v and %v.", i.Key(), i.Value()) + } + + if !i.Seek(0) { + t.Error("Expected iterator to seek to key.") + } + + if i.Key().(int) != 0 || i.Value().(int) != 0 { + t.Errorf("Expected iterator to reach key 0 and value 0, got %v and %v.", i.Key(), i.Value()) + } + + i = m.SeekToFirst() + defer i.Close() + + if !i.Seek(0) { + t.Error("Expected iterator to seek to key.") + } + + if i.Key().(int) != 0 || i.Value().(int) != 0 { + t.Errorf("Expected iterator to reach key 0 and value 0, got %v and %v.", i.Key(), i.Value()) + } +} + +func TestRank(t *testing.T) { + s := NewIntMap() + for i := 0; i < 10; i++ { + s.Set(i, i) + } + + for i := 0; i < 10; i++ { + if rank := s.GetRank(i); rank != (i + 1) { + t.Errorf("%d should be rank %d", i, i+1) + } + } +} + +func TestGetElementByRank(t *testing.T) { + s := NewIntMap() + for i := 0; i < 10; i++ { + s.Set(i, i) + } + + for i := 0; i < 10; i++ { + if v, ok := s.GetElementByRank(i + 1); !ok || v != i { + t.Errorf("%d should be rank %d", i, i+1) + } + } +} + +func BenchmarkLookup16(b *testing.B) { + LookupBenchmark(b, 16) +} + +func BenchmarkLookup256(b *testing.B) { + LookupBenchmark(b, 256) +} + +func BenchmarkLookup65536(b *testing.B) { + LookupBenchmark(b, 65536) +} + +func BenchmarkSet16(b *testing.B) { + SetBenchmark(b, 16) +} + +func BenchmarkSet256(b *testing.B) { + SetBenchmark(b, 256) +} + +func BenchmarkSet65536(b *testing.B) { + SetBenchmark(b, 65536) +} + +func BenchmarkRandomSeek(b *testing.B) { + b.StopTimer() + values := []int{} + s := NewIntMap() + for i := 0; i < b.N; i++ { + r := rand.Int() + values = append(values, r) + s.Set(r, r) + } + b.StartTimer() + for i := 0; i < b.N; i++ { + iterator := s.Seek(values[i]) + if iterator == nil { + b.Errorf("got incorrect value for index %d", i) + } + } +} + +const ( + lookAhead = 10 +) + +// This test is used for the baseline comparison of Iterator.Seek when +// performing forward sequential seek operations. +func BenchmarkForwardSeek(b *testing.B) { + b.StopTimer() + + values := []int{} + s := NewIntMap() + valueCount := b.N + for i := 0; i < valueCount; i++ { + r := rand.Int() + values = append(values, r) + s.Set(r, r) + } + sort.Ints(values) + + b.StartTimer() + for i := 0; i < b.N; i++ { + key := values[i] + iterator := s.Seek(key) + if i < valueCount-lookAhead { + nextKey := values[i+lookAhead] + + iterator = s.Seek(nextKey) + if iterator.Key().(int) != nextKey || iterator.Value().(int) != nextKey { + b.Errorf("%d. expected %d key and %d value, got %d key and %d value", i, nextKey, nextKey, iterator.Key(), iterator.Value()) + } + } + } +} + +// This test demonstrates the amortized cost of a forward sequential seek. +func BenchmarkForwardSeekReusedIterator(b *testing.B) { + b.StopTimer() + + values := []int{} + s := NewIntMap() + valueCount := b.N + for i := 0; i < valueCount; i++ { + r := rand.Int() + values = append(values, r) + s.Set(r, r) + + } + sort.Ints(values) + + b.StartTimer() + for i := 0; i < b.N; i++ { + key := values[i] + iterator := s.Seek(key) + if i < valueCount-lookAhead { + nextKey := values[i+lookAhead] + + if !iterator.Seek(nextKey) { + b.Errorf("%d. expected iterator to seek to %d key; failed.", i, nextKey) + } else if iterator.Key().(int) != nextKey || iterator.Value().(int) != nextKey { + b.Errorf("%d. expected %d key and %d value, got %d key and %d value", i, nextKey, nextKey, iterator.Key(), iterator.Value()) + } + } + } +} diff --git a/core/container/synchronizedlist.go b/core/container/synchronizedlist.go new file mode 100644 index 0000000..0e9037a --- /dev/null +++ b/core/container/synchronizedlist.go @@ -0,0 +1,60 @@ +package container + +import ( + "container/list" + "sync" +) + +type SynchronizedList struct { + list *list.List + lock *sync.Mutex +} + +func NewSynchronizedList() *SynchronizedList { + sl := &SynchronizedList{ + list: list.New(), + lock: new(sync.Mutex), + } + return sl +} + +func (sl *SynchronizedList) PushFront(v interface{}) { + sl.lock.Lock() + sl.list.PushFront(v) + sl.lock.Unlock() +} + +func (sl *SynchronizedList) PopFront() (v interface{}) { + sl.lock.Lock() + e := sl.list.Front() + if e != nil { + v = e.Value + sl.list.Remove(e) + } + sl.lock.Unlock() + return v +} + +func (sl *SynchronizedList) PushBack(v interface{}) { + sl.lock.Lock() + sl.list.PushBack(v) + sl.lock.Unlock() +} + +func (sl *SynchronizedList) PopBack() (v interface{}) { + sl.lock.Lock() + e := sl.list.Back() + if e != nil { + v = e.Value + sl.list.Remove(e) + } + sl.lock.Unlock() + return v +} + +func (sl *SynchronizedList) Len() (n int) { + sl.lock.Lock() + n = sl.list.Len() + sl.lock.Unlock() + return +} diff --git a/core/container/synchronizedmap.go b/core/container/synchronizedmap.go new file mode 100644 index 0000000..438bf5e --- /dev/null +++ b/core/container/synchronizedmap.go @@ -0,0 +1,82 @@ +package container + +import ( + "sync" +) + +type SynchronizedMap struct { + lock *sync.RWMutex + m map[interface{}]interface{} +} + +// NewSynchronizedMap return new SynchronizedMap +func NewSynchronizedMap() *SynchronizedMap { + return &SynchronizedMap{ + lock: new(sync.RWMutex), + m: make(map[interface{}]interface{}), + } +} + +// Get from maps return the k's value +func (m *SynchronizedMap) Get(k interface{}) interface{} { + m.lock.RLock() + if val, ok := m.m[k]; ok { + m.lock.RUnlock() + return val + } + m.lock.RUnlock() + return nil +} + +// Maps the given key and value. Returns false +// if the key is already in the map and changes nothing. +func (m *SynchronizedMap) Set(k interface{}, v interface{}) bool { + m.lock.Lock() + if val, ok := m.m[k]; !ok { + m.m[k] = v + } else if val != v { + m.m[k] = v + } else { + m.lock.Unlock() + return false + } + m.lock.Unlock() + return true +} + +// Returns true if k is exist in the map. +func (m *SynchronizedMap) IsExist(k interface{}) bool { + m.lock.RLock() + if _, ok := m.m[k]; !ok { + m.lock.RUnlock() + return false + } + m.lock.RUnlock() + return true +} + +// Delete the given key and value. +func (m *SynchronizedMap) Delete(k interface{}) { + m.lock.Lock() + delete(m.m, k) + m.lock.Unlock() +} + +// Items returns all items in SynchronizedMap. +func (m *SynchronizedMap) Items() map[interface{}]interface{} { + mm := make(map[interface{}]interface{}) + m.lock.RLock() + for k, v := range m.m { + mm[k] = v + } + m.lock.RUnlock() + return mm +} + +func (m *SynchronizedMap) Foreach(cb func(k, v interface{})) { + m.lock.Lock() + defer m.lock.Unlock() + for k, v := range m.m { + cb(k, v) + } +} diff --git a/core/ctx.go b/core/ctx.go new file mode 100644 index 0000000..1533ac6 --- /dev/null +++ b/core/ctx.go @@ -0,0 +1,94 @@ +package core + +import ( + "fmt" + "os" + "path/filepath" + + "mongo.games.com/goserver/core/basic" + "mongo.games.com/goserver/core/utils" +) + +const ( + HOOK_BEFORE_START int = iota + HOOK_AFTER_STOP + HOOK_MAX +) + +var ( + AppCtx *Ctx = newCtx() + hooks [HOOK_MAX][]hookfunc +) + +type hookfunc func() error + +type Ctx struct { + *basic.Object + CoreObj *basic.Object +} + +func newCtx() *Ctx { + ctx := &Ctx{} + ctx.init() + return ctx +} + +func (ctx *Ctx) init() { + ctx.Object = basic.NewObject(ObjId_RootId, + "root", + basic.Options{ + MaxDone: 1024, + QueueBacklog: 1024, + }, + nil) + ctx.Object.Waitor = utils.NewWaitor("core.Ctx") + ctx.UserData = ctx + ctx.Active() +} + +func LaunchChild(o *basic.Object) { + AppCtx.LaunchChild(o) +} + +func Terminate(o *basic.Object) { + AppCtx.Terminate(o) +} + +func CoreObject() *basic.Object { + //return AppCtx.GetChildById(ObjId_CoreId) + return AppCtx.CoreObj +} + +func RegisteHook(hookpos int, f hookfunc) { + if hookpos < 0 || hookpos > HOOK_MAX { + return + } + hooks[hookpos] = append(hooks[hookpos], f) +} + +func ExecuteHook(hookpos int) error { + if hookpos < 0 || hookpos > HOOK_MAX { + return nil + } + var err error + for _, h := range hooks[hookpos] { + err = h() + if err != nil { + return err + } + } + return nil +} + +func WritePid() { + if len(os.Args) > 0 { + baseName := filepath.Base(os.Args[0]) + f, err := os.OpenFile(baseName+".pid", os.O_CREATE|os.O_TRUNC|os.O_RDWR, os.ModePerm) + if err != nil { + panic(fmt.Sprintf("%s had running", os.Args[0])) + return + } + + f.WriteString(fmt.Sprintf("%v", os.Getpid())) + } +} diff --git a/core/doc.go b/core/doc.go new file mode 100644 index 0000000..8452228 --- /dev/null +++ b/core/doc.go @@ -0,0 +1,27 @@ +package core + +/* + +core struct + +AppCtx--------------------------------------------- + | | | | + | TimerMgr TaskExecutor Profile + | + AppModules-------------------------------------- + | | | + | | XXX_UserCustomModule + | | + | TransactModule + | + | + NetModule------------------------ + | | + | Connector----------------- + | | | + | Session Socket Connect + | + Acceptor--------------------- + | | + Session0 Session1..n +*/ diff --git a/core/i18n/config.go b/core/i18n/config.go new file mode 100644 index 0000000..cf96986 --- /dev/null +++ b/core/i18n/config.go @@ -0,0 +1,160 @@ +package i18n + +import ( + "crypto/md5" + "encoding/hex" + "encoding/json" + "io/ioutil" + "os" + "path/filepath" + "strings" + + "github.com/howeyc/fsnotify" + "mongo.games.com/goserver/core" + "mongo.games.com/goserver/core/basic" + "mongo.games.com/goserver/core/logger" +) + +var Config = Configuration{} + +type Configuration struct { + RootPath string + hashCodes map[string]string + watcher *fsnotify.Watcher +} + +func (this *Configuration) Name() string { + return "i18n" +} + +func (this *Configuration) Init() error { + var err error + workDir, err := os.Getwd() + if err != nil { + return err + } + this.RootPath = filepath.Join(workDir, this.RootPath) + this.watcher, err = fsnotify.NewWatcher() + if err != nil { + logger.Logger.Warn(" fsnotify.NewWatcher err:", err) + return err + } + + // Process events + go func() { + defer func() { + if err := recover(); err != nil { + logger.Logger.Warn("watch data director modify goroutine err:", err) + } + }() + for { + select { + case ev := <-this.watcher.Event: + if ev != nil && ev.IsModify() && filepath.Ext(ev.Name) == ".json" { + core.CoreObject().SendCommand(&fileModifiedCommand{fileName: ev.Name}, false) + logger.Logger.Trace("fsnotify event:", ev) + } + + case err := <-this.watcher.Error: + logger.Logger.Warn("fsnotify error:", err) + } + } + logger.Logger.Warn(this.RootPath, " watcher quit!") + }() + this.watcher.Watch(this.RootPath) + + this.hashCodes = make(map[string]string) + //获得配置目录中所有的数据文件 + var files = []string{} + filepath.Walk(this.RootPath, func(path string, info os.FileInfo, err error) error { + if info.IsDir() == false && filepath.Ext(info.Name()) == ".json" { + files = append(files, info.Name()) + } + return nil + }) + //加载这些数据文件 + h := md5.New() + for _, f := range files { + logger.Logger.Trace("load file name ======", f) + buf, err := ioutil.ReadFile(filepath.Join(this.RootPath, f)) + if err != nil { + logger.Logger.Warn("i18n Config.Init ioutil.ReadFile error", err) + return err + } + kv := make(map[string]string) + err = json.Unmarshal(buf, &kv) + if err != nil { + logger.Logger.Warn("i18n Config.Init json.Unmarshal error", err) + return err + } + nameAndExt := strings.SplitN(f, ".", 2) + if len(nameAndExt) == 2 { + lang := nameAndExt[0] + loc := &locale{lang: lang, message: kv} + if loc != nil { + locales.Add(loc) + } + + h.Reset() + h.Write(buf) + this.hashCodes[f] = hex.EncodeToString(h.Sum(nil)) + } + } + return nil +} + +func (this *Configuration) Close() error { + this.watcher.Close() + return nil +} + +type fileModifiedCommand struct { + fileName string +} + +func (fmc *fileModifiedCommand) Done(o *basic.Object) error { + fn := filepath.Base(fmc.fileName) + hashCode := Config.hashCodes[fn] + buf, err := ioutil.ReadFile(filepath.Join(Config.RootPath, fn)) + if err != nil { + logger.Logger.Warn("i18n fileModifiedCommand ioutil.ReadFile error", err) + return err + } + if len(buf) == 0 { + return nil + } + h := md5.New() + h.Reset() + h.Write(buf) + newCode := hex.EncodeToString(h.Sum(nil)) + if newCode != hashCode { + logger.Logger.Trace("modified file name ======", fn) + kv := make(map[string]string) + err = json.Unmarshal(buf, &kv) + if err != nil { + logger.Logger.Warn("i18n Config.Init json.Unmarshal error", err) + return err + } + + nameAndExt := strings.SplitN(fn, ".", 2) + if len(nameAndExt) == 2 { + lang := nameAndExt[0] + loc, exist := locales.getLocale(lang) + if exist { + loc.message = kv + } else { + loc = &locale{lang: lang, message: kv} + if loc != nil { + locales.Add(loc) + } + } + Config.hashCodes[fn] = newCode + } + } + + return nil +} + +func init() { + core.RegistePackage(&Config) +} diff --git a/core/i18n/i18n.go b/core/i18n/i18n.go new file mode 100644 index 0000000..001135f --- /dev/null +++ b/core/i18n/i18n.go @@ -0,0 +1,115 @@ +package i18n + +import ( + "fmt" + "reflect" +) + +var ( + locales = &localeStore{store: make(map[string]*locale)} +) + +type locale struct { + id int + lang string + message map[string]string +} + +type localeStore struct { + langs []string + store map[string]*locale +} + +// Get locale from localeStore use specify lang string +func (d *localeStore) getLocale(lang string) (*locale, bool) { + if l, ok := d.store[lang]; ok { + return l, true + } + return nil, false +} + +// Get target language string +func (d *localeStore) Get(lang, key string) (string, bool) { + if locale, ok := d.getLocale(lang); ok { + if value, ok := locale.message[key]; ok { + return value, true + } + } + return "", false +} + +func (d *localeStore) Add(lc *locale) bool { + if _, ok := d.store[lc.lang]; ok { + return false + } + lc.id = len(d.langs) + d.langs = append(d.langs, lc.lang) + d.store[lc.lang] = lc + return true +} + +// List all locale languages +func ListLangs() []string { + langs := make([]string, len(locales.langs)) + copy(langs, locales.langs) + return langs +} + +// Check language name if exist +func IsExist(lang string) bool { + _, ok := locales.store[lang] + return ok +} + +// Check language name if exist +func IndexLang(lang string) int { + if lc, ok := locales.store[lang]; ok { + return lc.id + } + return -1 +} + +// Get language by index id +func GetLangByIndex(index int) string { + if index < 0 || index >= len(locales.langs) { + return "" + } + return locales.langs[index] +} + +// A Locale describles the information of localization. +type Locale struct { + Lang string +} + +// Tr translate content to target language. +func (l Locale) Tr(key string, args ...interface{}) string { + return Tr(l.Lang, key, args...) +} + +// Index get lang index of LangStore +func (l Locale) Index() int { + return IndexLang(l.Lang) +} + +// Tr translate content to target language. +func Tr(lang, key string, args ...interface{}) string { + value, ok := locales.Get(lang, key) + if ok && len(args) > 0 { + params := make([]interface{}, 0, len(args)) + for _, arg := range args { + if arg != nil { + val := reflect.ValueOf(arg) + if val.Kind() == reflect.Slice { + for i := 0; i < val.Len(); i++ { + params = append(params, val.Index(i).Interface()) + } + } else { + params = append(params, arg) + } + } + } + return fmt.Sprintf(value, params...) + } + return fmt.Sprintf(value) +} diff --git a/core/ids.go b/core/ids.go new file mode 100644 index 0000000..a1c25ad --- /dev/null +++ b/core/ids.go @@ -0,0 +1,9 @@ +package core + +const ( + ObjId_RootId int = iota + ObjId_CoreId + ObjId_ExecutorId + ObjId_TimerId + ObjId_ProfileId +) diff --git a/core/loader.go b/core/loader.go new file mode 100644 index 0000000..1c233fe --- /dev/null +++ b/core/loader.go @@ -0,0 +1,102 @@ +package core + +import ( + "encoding/json" + "io" + "io/ioutil" + "path" + + "mongo.games.com/goserver/core/logger" +) + +type Package interface { + Name() string + Init() error + io.Closer +} +type ConfigFileEncryptorHook interface { + IsCipherText([]byte) bool + Encrypt([]byte) []byte + Decrtypt([]byte) []byte +} + +var packages = make(map[string]Package) +var packagesLoaded = make(map[string]bool) +var configFileEH ConfigFileEncryptorHook + +func RegistePackage(p Package) { + packages[p.Name()] = p +} + +func IsPackageRegisted(name string) bool { + if _, exist := packages[name]; exist { + return true + } + return false +} + +func IsPackageLoaded(name string) bool { + if _, exist := packagesLoaded[name]; exist { + return true + } + return false +} +func RegisterConfigEncryptor(h ConfigFileEncryptorHook) { + configFileEH = h +} +func LoadPackages(configFile string) { + //logger.Logger.Infof("Build time is: %s", BuildTime()) + switch path.Ext(configFile) { + case ".json": + fileBuff, err := ioutil.ReadFile(configFile) + if err != nil { + logger.Logger.Errorf("Error while reading config file %s: %s", configFile, err) + break + } + if configFileEH != nil { + if configFileEH.IsCipherText(fileBuff) { + fileBuff = configFileEH.Decrtypt(fileBuff) + } + } + var fileData interface{} + err = json.Unmarshal(fileBuff, &fileData) + if err != nil { + break + } + fileMap := fileData.(map[string]interface{}) + for name, pkg := range packages { + if moduleData, ok := fileMap[name]; ok { + if data, ok := moduleData.(map[string]interface{}); ok { + modelBuff, _ := json.Marshal(data) + err = json.Unmarshal(modelBuff, &pkg) + if err != nil { + logger.Logger.Errorf("Error while unmarshalling JSON from config file %s: %s", configFile, err) + } else { + err = pkg.Init() + if err != nil { + logger.Logger.Errorf("Error while initializing package %s: %s", pkg.Name(), err) + } else { + packagesLoaded[pkg.Name()] = true + logger.Logger.Infof("package [%16s] load success", pkg.Name()) + } + } + } else { + logger.Logger.Errorf("Package %v init data unmarshal failed.", pkg.Name()) + } + } else { + logger.Logger.Errorf("Package %v init data not exist.", pkg.Name()) + } + } + default: + panic("Unsupported config file: " + configFile) + } +} + +func ClosePackages() { + for _, pkg := range packages { + err := pkg.Close() + if err != nil { + logger.Logger.Errorf("Error while closing package %s: %s", pkg.Name(), err) + } + } +} diff --git a/core/logger/ilogger.go b/core/logger/ilogger.go new file mode 100644 index 0000000..c1e70d5 --- /dev/null +++ b/core/logger/ilogger.go @@ -0,0 +1,21 @@ +package logger + +type ILogger interface { + Tracef(format string, params ...interface{}) + Debugf(format string, params ...interface{}) + Infof(format string, params ...interface{}) + Warnf(format string, params ...interface{}) error + Errorf(format string, params ...interface{}) error + Criticalf(format string, params ...interface{}) error + + Trace(v ...interface{}) + Debug(v ...interface{}) + Info(v ...interface{}) + Warn(v ...interface{}) error + Error(v ...interface{}) error + Critical(v ...interface{}) error + + Close() + Flush() + Closed() bool +} diff --git a/core/logger/log.go b/core/logger/log.go new file mode 100644 index 0000000..b7b3a20 --- /dev/null +++ b/core/logger/log.go @@ -0,0 +1,107 @@ +package logger + +import ( + "fmt" + + "github.com/cihub/seelog" +) + +var ( + Logger seelog.LoggerInterface +) + +func init() { + Logger, _ = seelog.LoggerFromConfigAsFile("logger.xml") + seelog.ReplaceLogger(Logger) +} + +func Reload(fileName string) error { + newLogger, err := seelog.LoggerFromConfigAsFile(fileName) + if err != nil { + return err + } + if newLogger != nil { + Logger = newLogger + seelog.ReplaceLogger(Logger) + fmt.Println("Reload success") + } + return nil +} + +func Tracef(format string, params ...interface{}) { + if Logger != nil { + Logger.Tracef(format, params...) + } +} + +func Debugf(format string, params ...interface{}) { + if Logger != nil { + Logger.Debugf(format, params...) + } +} + +func Infof(format string, params ...interface{}) { + if Logger != nil { + Logger.Infof(format, params...) + } +} + +func Warnf(format string, params ...interface{}) error { + if Logger != nil { + return Logger.Warnf(format, params...) + } + return nil +} + +func Errorf(format string, params ...interface{}) error { + if Logger != nil { + return Logger.Errorf(format, params...) + } + return nil +} + +func Criticalf(format string, params ...interface{}) error { + if Logger != nil { + return Logger.Criticalf(format, params...) + } + return nil +} + +func Trace(v ...interface{}) { + if Logger != nil { + Logger.Trace(v...) + } +} + +func Debug(v ...interface{}) { + if Logger != nil { + Logger.Debug(v...) + } +} + +func Info(v ...interface{}) { + if Logger != nil { + Logger.Info(v...) + } +} + +func Warn(v ...interface{}) error { + if Logger != nil { + return Logger.Warn(v...) + } + return nil +} + +func Error(v ...interface{}) error { + if Logger != nil { + return Logger.Error(v...) + } + return nil +} + +func Critical(v ...interface{}) error { + if Logger != nil { + return Logger.Critical(v...) + } + return nil +} diff --git a/core/module/config.go b/core/module/config.go new file mode 100644 index 0000000..6049cfb --- /dev/null +++ b/core/module/config.go @@ -0,0 +1,42 @@ +package module + +import ( + "time" + + "mongo.games.com/goserver/core" + "mongo.games.com/goserver/core/basic" +) + +var Config = Configuration{} + +type Configuration struct { + Options basic.Options +} + +func (c *Configuration) Name() string { + return "module" +} + +func (c *Configuration) Init() error { + if c.Options.QueueBacklog <= 0 { + c.Options.QueueBacklog = 1024 + } + if c.Options.MaxDone <= 0 { + c.Options.MaxDone = 1024 + } + if c.Options.Interval <= 0 { + c.Options.Interval = time.Millisecond * 10 + } else { + c.Options.Interval = time.Millisecond * c.Options.Interval + } + + return nil +} + +func (c *Configuration) Close() error { + return nil +} + +func init() { + core.RegistePackage(&Config) +} diff --git a/core/module/module.go b/core/module/module.go new file mode 100644 index 0000000..8b5b842 --- /dev/null +++ b/core/module/module.go @@ -0,0 +1,13 @@ +package module + +const ( + ModuleName_Net string = "net-module" + ModuleName_Transact = "dtc-module" +) + +type Module interface { + ModuleName() string + Init() + Update() + Shutdown() +} diff --git a/core/module/modulemgr.go b/core/module/modulemgr.go new file mode 100644 index 0000000..62cff81 --- /dev/null +++ b/core/module/modulemgr.go @@ -0,0 +1,336 @@ +package module + +import ( + "container/list" + "time" + + "fmt" + "mongo.games.com/goserver/core" + "mongo.games.com/goserver/core/basic" + "mongo.games.com/goserver/core/logger" + "mongo.games.com/goserver/core/profile" + "mongo.games.com/goserver/core/utils" +) + +const ( + ///module state + ModuleStateInvalid int = iota + ModuleStateInit + ModuleStateRun + ModuleStateShutdown + ModuleStateWaitShutdown + ModuleStateFini + ///other + ModuleMaxCount = 1024 +) + +var ( + AppModule = newModuleMgr() +) + +type ModuleEntity struct { + lastTick time.Time + tickInterval time.Duration + priority int + module Module + quited bool +} + +type PreloadModuleEntity struct { + priority int + module PreloadModule +} + +type ModuleMgr struct { + *basic.Object + modules *list.List + modulesByName map[string]*ModuleEntity + preloadModule *list.List + state int + waitShutAct chan interface{} + waitShutCnt int + waitShut bool + currTimeSec int64 + currTimeNano int64 + currTime time.Time +} + +func newModuleMgr() *ModuleMgr { + mm := &ModuleMgr{ + modules: list.New(), + preloadModule: list.New(), + modulesByName: make(map[string]*ModuleEntity), + waitShutAct: make(chan interface{}, ModuleMaxCount), + state: ModuleStateInvalid, + } + + return mm +} + +func (this *ModuleMgr) GetCurrTime() time.Time { + return this.currTime +} + +func (this *ModuleMgr) GetCurrTimeSec() int64 { + return this.currTimeSec +} + +func (this *ModuleMgr) GetCurrTimeNano() int64 { + return this.currTimeNano +} + +func (this *ModuleMgr) RegisteModule(m Module, tickInterval time.Duration, priority int) { + logger.Logger.Infof("module [%16s] registe;interval=%v,priority=%v", m.ModuleName(), tickInterval, priority) + mentiry := &ModuleEntity{ + lastTick: time.Now(), + tickInterval: tickInterval, + priority: priority, + module: m, + } + + this.modulesByName[m.ModuleName()] = mentiry + + for e := this.modules.Front(); e != nil; e = e.Next() { + if me, ok := e.Value.(*ModuleEntity); ok { + if priority < me.priority { + this.modules.InsertBefore(mentiry, e) + return + } + } + } + this.modules.PushBack(mentiry) +} + +func (this *ModuleMgr) UnregisteModule(m Module) { + for e := this.modules.Front(); e != nil; e = e.Next() { + if me, ok := e.Value.(*ModuleEntity); ok { + if me.module == m { + delete(this.modulesByName, m.ModuleName()) + this.modules.Remove(e) + return + } + } + } +} + +func (this *ModuleMgr) RegistePreloadModule(m PreloadModule, priority int) { + mentiry := &PreloadModuleEntity{ + priority: priority, + module: m, + } + + for e := this.preloadModule.Front(); e != nil; e = e.Next() { + if me, ok := e.Value.(*PreloadModuleEntity); ok { + if priority < me.priority { + this.preloadModule.InsertBefore(mentiry, e) + return + } + } + } + this.preloadModule.PushBack(mentiry) +} + +func (this *ModuleMgr) UnregistePreloadModule(m PreloadModule) { + for e := this.preloadModule.Front(); e != nil; e = e.Next() { + if me, ok := e.Value.(*PreloadModuleEntity); ok { + if me.module == m { + this.preloadModule.Remove(e) + return + } + } + } +} + +func (this *ModuleMgr) Start() *utils.Waitor { + logger.Logger.Info("Startup PreloadModules") + for e := this.preloadModule.Front(); e != nil; e = e.Next() { + if me, ok := e.Value.(*PreloadModuleEntity); ok { + me.module.Start() + } + } + logger.Logger.Info("Startup PreloadModules [ok]") + + this.Object = basic.NewObject(core.ObjId_CoreId, + "core", + Config.Options, + this) + this.UserData = this + core.LaunchChild(this.Object) + core.AppCtx.CoreObj = this.Object + this.state = ModuleStateInit + //给模块预留调度的空间,防止主线程直接跑过去 + select { + case <-time.After(time.Second): + } + return this.Object.Waitor +} + +func (this *ModuleMgr) Close() { + this.state = ModuleStateShutdown +} + +func (this *ModuleMgr) init() { + logger.Logger.Info("Start Initialize Modules") + defer logger.Logger.Info("Start Initialize Modules [ok]") + for e := this.modules.Front(); e != nil; e = e.Next() { + if me, ok := e.Value.(*ModuleEntity); ok && !me.quited { + logger.Logger.Infof("module [%16s] init...", me.module.ModuleName()) + me.safeInit() + logger.Logger.Infof("module [%16s] init[ok]", me.module.ModuleName()) + } + } + this.state = ModuleStateRun +} + +func (this *ModuleMgr) update() { + nowTime := time.Now() + this.currTime = nowTime + this.currTimeSec = nowTime.Unix() + this.currTimeNano = nowTime.UnixNano() + for e := this.modules.Front(); e != nil; e = e.Next() { + if me, ok := e.Value.(*ModuleEntity); ok && !me.quited { + me.safeUpt(nowTime) + } + } +} + +func (this *ModuleMgr) shutdown() { + if this.waitShut { + return + } + logger.Logger.Info("ModuleMgr shutdown()") + this.waitShut = true + this.state = ModuleStateWaitShutdown + for e := this.modules.Front(); e != nil; e = e.Next() { + if me, ok := e.Value.(*ModuleEntity); ok { + logger.Logger.Infof("module [%16s] shutdown...", me.module.ModuleName()) + me.safeShutdown(this.waitShutAct) + logger.Logger.Infof("module [%16s] shutdown[ok]", me.module.ModuleName()) + this.waitShutCnt++ + } + } +} + +func (this *ModuleMgr) checkShutdown() bool { + select { + case param := <-this.waitShutAct: + logger.Logger.Infof("module [%16s] shutdowned", param) + if name, ok := param.(string); ok { + me := this.getModuleEntityByName(name) + if me != nil && !me.quited { + me.quited = true + this.waitShutCnt-- + } + } + case _ = <-time.After(time.Second): + logger.Logger.Trace("ModuleMgr.checkShutdown wait...") + for e := this.modules.Front(); e != nil; e = e.Next() { + if me, ok := e.Value.(*ModuleEntity); ok { + if me.quited == false { + logger.Logger.Infof("Module [%v] wait shutdown...", me.module.ModuleName()) + } + } + } + //default: + } + if this.waitShutCnt == 0 { + this.state = ModuleStateFini + return true + } + this.update() + return false +} + +func (this *ModuleMgr) tick() { + + switch this.state { + case ModuleStateInit: + this.init() + case ModuleStateRun: + this.update() + case ModuleStateShutdown: + this.shutdown() + case ModuleStateWaitShutdown: + this.checkShutdown() + case ModuleStateFini: + this.fini() + } +} + +func (this *ModuleMgr) fini() { + core.Terminate(this.Object) + this.state = ModuleStateInvalid + logger.Logger.Info("=============ModuleMgr fini=============") + logger.Logger.Flush() +} + +func (this *ModuleMgr) getModuleEntityByName(name string) *ModuleEntity { + if me, exist := this.modulesByName[name]; exist { + return me + } + return nil +} + +func (this *ModuleMgr) GetModuleByName(name string) Module { + if me, exist := this.modulesByName[name]; exist { + return me.module + } + return nil +} + +func (this *ModuleEntity) safeInit() { + defer utils.DumpStackIfPanic("ModuleEntity.safeInit") + this.module.Init() +} + +func (this *ModuleEntity) safeUpt(nowTime time.Time) { + defer utils.DumpStackIfPanic("ModuleEntity.safeTick") + + if this.tickInterval == 0 || nowTime.Sub(this.lastTick) >= this.tickInterval { + this.lastTick = nowTime + watch := profile.TimeStatisticMgr.WatchStart(fmt.Sprintf("/module/%v/update", this.module.ModuleName()), profile.TIME_ELEMENT_MODULE) + if watch != nil { + defer watch.Stop() + } + this.module.Update() + } +} + +func (this *ModuleEntity) safeShutdown(shutWaitAck chan<- interface{}) { + defer utils.DumpStackIfPanic("ModuleEntity.safeShutdown") + this.module.Shutdown() +} + +func (this *ModuleMgr) OnStart() {} +func (this *ModuleMgr) OnStop() {} +func (this *ModuleMgr) OnTick() { + this.tick() +} + +func RegistePreloadModule(m PreloadModule, priority int) { + AppModule.RegistePreloadModule(m, priority) +} + +func RegisteModule(m Module, tickInterval time.Duration, priority int) { + AppModule.RegisteModule(m, tickInterval, priority) +} + +func UnregisteModule(m Module) { + AppModule.waitShutAct <- m.ModuleName() +} + +func Start() *utils.Waitor { + err := core.ExecuteHook(core.HOOK_BEFORE_START) + if err != nil { + logger.Logger.Error("ExecuteHook(HOOK_BEFORE_START) error", err) + } + return AppModule.Start() +} + +func Stop() { + AppModule.Close() + err := core.ExecuteHook(core.HOOK_AFTER_STOP) + if err != nil { + logger.Logger.Error("ExecuteHook(HOOK_BEFORE_START) error", err) + } +} diff --git a/core/module/preload.go b/core/module/preload.go new file mode 100644 index 0000000..5b19d6e --- /dev/null +++ b/core/module/preload.go @@ -0,0 +1,5 @@ +package module + +type PreloadModule interface { + Start() +} diff --git a/core/mongo/config.go b/core/mongo/config.go new file mode 100644 index 0000000..2bce98e --- /dev/null +++ b/core/mongo/config.go @@ -0,0 +1,272 @@ +package mongo + +import ( + "fmt" + "mongo.games.com/goserver/core/logger" + "sync" + "sync/atomic" + "time" + + "github.com/globalsign/mgo" + "mongo.games.com/goserver/core" + "mongo.games.com/goserver/core/container" +) + +var Config = Configuration{ + Dbs: make(map[string]DbConfig), +} + +var autoPingInterval time.Duration = 30 * time.Second +var mgoSessions = container.NewSynchronizedMap() +var databases = container.NewSynchronizedMap() +var cLock sync.RWMutex +var collections = make(map[string]*Collection) +var resetingSess int32 + +type Configuration struct { + Dbs map[string]DbConfig +} +type DbConfig struct { + Host string + Database string + User string + Password string + Safe mgo.Safe +} + +func (c *Configuration) Name() string { + return "mongo" +} + +func (c *Configuration) Init() error { + //auto ping, ensure net is connected + go func() { + for { + select { + case <-time.After(autoPingInterval): + Ping() + } + } + }() + return nil +} + +func (c *Configuration) Close() error { + sessions := mgoSessions.Items() + for k, s := range sessions { + if session, ok := s.(*mgo.Session); ok && session != nil { + logger.Logger.Warnf("mongo.Close!!! (%v)", k) + session.Close() + } + } + return nil +} + +func init() { + core.RegistePackage(&Config) +} + +type Collection struct { + *mgo.Collection + Ref int32 + valid bool +} + +func (c *Collection) Hold() { + if atomic.AddInt32(&c.Ref, 1) == 1 { + key := c.Database.Name + c.FullName + cLock.Lock() + if old, exist := collections[key]; exist { + old.valid = false + } + collections[key] = c + cLock.Unlock() + } +} + +func (c *Collection) Unhold() { + if atomic.AddInt32(&c.Ref, -1) == 0 { + key := c.Database.Name + c.FullName + cLock.Lock() + delete(collections, key) + cLock.Unlock() + } +} + +func (c *Collection) IsValid() bool { + return c.valid +} + +func newDBSession(dbc *DbConfig) (s *mgo.Session, err error) { + login := "" + if dbc.User != "" { + login = dbc.User + ":" + dbc.Password + "@" + } + host := "localhost" + if dbc.Host != "" { + host = dbc.Host + } + + // http://goneat.org/pkg/labix.org/v2/mgo/#Session.Mongo + // [mongodb://][user:pass@]host1[:port1][,host2[:port2],...][/database][?options] + url := fmt.Sprintf("mongodb://%s%s/admin", login, host) + //fmt.Println(url) + session, err := mgo.Dial(url) + if err != nil { + return + } + session.SetSafe(&dbc.Safe) + s = session + return +} + +func Ping() { + if atomic.LoadInt32(&resetingSess) == 1 { + return + } + var err error + sessions := mgoSessions.Items() + for k, s := range sessions { + if session, ok := s.(*mgo.Session); ok && session != nil { + if atomic.LoadInt32(&resetingSess) == 1 { + return + } + err = session.Ping() + if err != nil { + logger.Logger.Errorf("mongo.Ping (%v) err:%v", k, err) + if atomic.LoadInt32(&resetingSess) == 1 { + return + } + session.Refresh() + } else { + logger.Logger.Tracef("mongo.Ping (%v) suc", k) + } + } + } +} + +func SetAutoPing(interv time.Duration) { + autoPingInterval = interv +} + +func Database(dbName string) *mgo.Database { + if atomic.LoadInt32(&resetingSess) == 1 { + return nil + } + var dbc DbConfig + var exist bool + if dbc, exist = Config.Dbs[dbName]; !exist { + return nil + } + d := databases.Get(dbName) + if d == nil { + s, err := newDBSession(&dbc) + if err != nil { + fmt.Println("Database:", dbName, " error:", err) + return nil + } + mgoSessions.Set(dbName, s) + db := s.DB(dbc.Database) + if db == nil { + return nil + } + databases.Set(dbName, db) + return db + } else { + if db, ok := d.(*mgo.Database); ok { + return db + } + } + return nil +} + +func DatabaseWithSubName(dbName, subName string) *mgo.Database { + if atomic.LoadInt32(&resetingSess) == 1 { + return nil + } + var dbc DbConfig + var exist bool + if dbc, exist = Config.Dbs[dbName]; !exist { + return nil + } + fullName := fmt.Sprintf("%s_%s", dbName, subName) + d := databases.Get(fullName) + if d == nil { + s, err := newDBSession(&dbc) + if err != nil { + fmt.Println("Database:", fullName, " error:", err) + return nil + } + mgoSessions.Set(dbName, s) + db := s.DB(dbc.Database + subName) + if db == nil { + return nil + } + databases.Set(fullName, db) + return db + } else { + if db, ok := d.(*mgo.Database); ok { + return db + } + } + return nil +} + +func DatabaseC(dbName, collectionName string) *Collection { + if atomic.LoadInt32(&resetingSess) == 1 { + return nil + } + //一个库共享一个连接池 + db := Database(dbName) + if db != nil { + c := db.C(collectionName) + if c != nil { + return &Collection{Collection: c, valid: true} + } + } + return nil +} + +func DatabaseWithSubNameC(dbName, subName, collectionName string) *Collection { + if atomic.LoadInt32(&resetingSess) == 1 { + return nil + } + //一个库共享一个连接池 + db := DatabaseWithSubName(dbName, subName) + if db != nil { + c := db.C(collectionName) + if c != nil { + return &Collection{Collection: c, valid: true} + } + } + return nil +} + +// 不严格的多线程保护 +func ResetAllSession() { + atomic.StoreInt32(&resetingSess, 1) + defer atomic.StoreInt32(&resetingSess, 0) + tstart := time.Now() + logger.Logger.Warnf("ResetAllSession!!! start.") + sessions := mgoSessions.Items() + mgoSessions = container.NewSynchronizedMap() + databases = container.NewSynchronizedMap() + + //使缓存无效 + cLock.Lock() + for k, c := range collections { + c.valid = false + logger.Logger.Warnf("%s collections reset.", k) + } + collections = make(map[string]*Collection) + cLock.Unlock() + + //关闭旧的session + for k, s := range sessions { + if session, ok := s.(*mgo.Session); ok && session != nil { + logger.Logger.Warnf("mongo.Close!!! (%v)", k) + session.Close() + } + } + logger.Logger.Warnf("ResetAllSession!!! end. take:%v", time.Now().Sub(tstart)) +} diff --git a/core/netlib/acceptor.go b/core/netlib/acceptor.go new file mode 100644 index 0000000..b74189e --- /dev/null +++ b/core/netlib/acceptor.go @@ -0,0 +1,9 @@ +package netlib + +import "net" + +type Acceptor interface { + ioService + GetSessionConfig() *SessionConfig + Addr() net.Addr +} diff --git a/core/netlib/action.go b/core/netlib/action.go new file mode 100644 index 0000000..ff66a2d --- /dev/null +++ b/core/netlib/action.go @@ -0,0 +1,40 @@ +package netlib + +import ( + "fmt" + "reflect" + + "mongo.games.com/goserver/core/logger" + "mongo.games.com/goserver/core/profile" + "mongo.games.com/goserver/core/utils" +) + +type action struct { + s *Session + p interface{} + n string + packid int + logicNo uint32 + next *action +} + +func (this *action) do() { + watch := profile.TimeStatisticMgr.WatchStart(fmt.Sprintf("/action/%v", this.n), profile.TIME_ELEMENT_ACTION) + defer func() { + FreeAction(this) + if watch != nil { + watch.Stop() + } + utils.DumpStackIfPanic(fmt.Sprintf("netlib.session.task.do exe error, packet type:%v", reflect.TypeOf(this.p))) + }() + + h := GetHandler(this.packid) + if h != nil { + err := h.Process(this.s, this.packid, this.p) + if err != nil { + logger.Logger.Infof("%v process error %v", this.n, err) + } + } else { + logger.Logger.Infof("%v not registe handler", this.n) + } +} diff --git a/core/netlib/command_start_ioservice.go b/core/netlib/command_start_ioservice.go new file mode 100644 index 0000000..1e818d1 --- /dev/null +++ b/core/netlib/command_start_ioservice.go @@ -0,0 +1,25 @@ +package netlib + +import ( + "mongo.games.com/goserver/core" + "mongo.games.com/goserver/core/basic" +) + +type startIoService struct { + sc *SessionConfig +} + +func (sis *startIoService) Done(o *basic.Object) error { + + s := NetModule.newIoService(sis.sc) + if s != nil { + NetModule.pool[sis.sc.Id] = s + s.start() + } + + return nil +} + +func SendStartNetIoService(sc *SessionConfig) bool { + return core.CoreObject().SendCommand(&startIoService{sc: sc}, false) +} diff --git a/core/netlib/config.go b/core/netlib/config.go new file mode 100644 index 0000000..f70c0fd --- /dev/null +++ b/core/netlib/config.go @@ -0,0 +1,172 @@ +// config +package netlib + +import ( + "time" + + "mongo.games.com/goserver/core" + "mongo.games.com/goserver/core/logger" +) + +var Config = Configuration{} + +type Configuration struct { + SrvInfo ServerInfo + IoServices []SessionConfig +} + +type ServerInfo struct { + Name string + Type int + Id int + AreaID int + Data string + Banner []string +} + +type SessionConfig struct { + Id int + Type int + AreaId int + Protocol string //"tcp" "ws" "wss" + Name string + Ip string + Path string //path of "ws" + CertFile string + KeyFile string + OuterIp string //对外的IP + Port int + MaxDone int + MaxPend int + MaxPacket int + MaxConn int + ExtraConn int + RcvBuff int + SndBuff int + SoLinger int + MTU int + WriteTimeout time.Duration + ReadTimeout time.Duration + IdleTimeout time.Duration + KeepAlive bool + KeepAlivePeriod time.Duration + KeepAliveIdle time.Duration + KeepAliveCount int + NoDelay bool + IsClient bool + IsAutoReconn bool + IsInnerLink bool + AuthKey string //Authentication Key + EncoderName string //ProtocolEncoder name + DecoderName string //ProtocolDecoder name + UnknowPacketHandlerName string + FilterChain []string + HandlerChain []string + SupportFragment bool + AllowMultiConn bool + encoder ProtocolEncoder + decoder ProtocolDecoder + sfc *SessionFilterChain + shc *SessionHandlerChain + eph UnknowPacketHandler +} + +func (c *Configuration) Name() string { + return "netlib" +} + +func (c *Configuration) Init() error { + for _, str := range c.SrvInfo.Banner { + logger.Logger.Info(str) + } + + for i := 0; i < len(c.IoServices); i++ { + c.IoServices[i].Init() + } + return nil +} + +func (c *Configuration) Close() error { + return nil +} + +func (sc *SessionConfig) Init() { + if sc.EncoderName == "" { + sc.encoder = GetProtocolEncoder(DefaultProtocolEncoderName) + } else { + sc.encoder = GetProtocolEncoder(sc.EncoderName) + } + if sc.DecoderName == "" { + sc.decoder = GetProtocolDecoder(DefaultProtocoDecoderName) + } else { + sc.decoder = GetProtocolDecoder(sc.DecoderName) + } + + for i := 0; i < len(sc.FilterChain); i++ { + creator := GetSessionFilterCreator(sc.FilterChain[i]) + if creator != nil { + if sc.sfc == nil { + sc.sfc = NewSessionFilterChain() + } + if sc.sfc != nil { + sc.sfc.AddLast(creator()) + } + } + } + + for i := 0; i < len(sc.HandlerChain); i++ { + creator := GetSessionHandlerCreator(sc.HandlerChain[i]) + if creator != nil { + if sc.shc == nil { + sc.shc = NewSessionHandlerChain() + } + if sc.shc != nil { + sc.shc.AddLast(creator()) + } + } + } + + if sc.UnknowPacketHandlerName != "" { + creator := GetUnknowPacketHandlerCreator(sc.UnknowPacketHandlerName) + if creator != nil { + sc.eph = creator() + } else { + logger.Logger.Warnf("[%v] UnknowPacketHandler not registe", sc.UnknowPacketHandlerName) + } + } + if sc.IdleTimeout <= 0 { + sc.IdleTimeout = 5 * time.Second + } else { + sc.IdleTimeout = sc.IdleTimeout * time.Second + } + if sc.WriteTimeout <= 0 { + sc.WriteTimeout = 30 * time.Second + } else { + sc.WriteTimeout = sc.WriteTimeout * time.Second + } + if sc.ReadTimeout <= 0 { + sc.ReadTimeout = 30 * time.Second + } else { + sc.ReadTimeout = sc.ReadTimeout * time.Second + } + + sc.KeepAlivePeriod *= time.Second + sc.KeepAliveIdle *= time.Second +} + +func (sc *SessionConfig) GetFilter(name string) SessionFilter { + if sc.sfc != nil { + return sc.sfc.GetFilter(name) + } + return nil +} + +func (sc *SessionConfig) GetHandler(name string) SessionHandler { + if sc.shc != nil { + return sc.shc.GetHandler(name) + } + return nil +} +func init() { + core.RegistePackage(&Config) +} diff --git a/core/netlib/connector.go b/core/netlib/connector.go new file mode 100644 index 0000000..c7b5ee9 --- /dev/null +++ b/core/netlib/connector.go @@ -0,0 +1,12 @@ +package netlib + +import "time" + +const ( + ReconnectInterval time.Duration = 5 * time.Second +) + +type Connector interface { + ioService + GetSessionConfig() *SessionConfig +} diff --git a/core/netlib/connectormgr.go b/core/netlib/connectormgr.go new file mode 100644 index 0000000..4a22b8e --- /dev/null +++ b/core/netlib/connectormgr.go @@ -0,0 +1,46 @@ +package netlib + +import ( + "fmt" + "sync" +) + +var ( + ConnectorMgr = &connectorMgr{ + pool: make(map[string]Connector), + } +) + +type connectorMgr struct { + pool map[string]Connector + lock sync.Mutex +} + +func (cm *connectorMgr) IsConnecting(sc *SessionConfig) bool { + strKey := fmt.Sprintf("%v:%v", sc.Ip, sc.Port) + cm.lock.Lock() + defer cm.lock.Unlock() + if _, exist := cm.pool[strKey]; exist { + return true + } + return false +} + +func (cm *connectorMgr) registeConnector(c Connector) { + sc := c.GetSessionConfig() + strKey := fmt.Sprintf("%v:%v", sc.Ip, sc.Port) + cm.lock.Lock() + defer cm.lock.Unlock() + cm.pool[strKey] = c +} + +func (cm *connectorMgr) unregisteConnector(c Connector) { + cm.lock.Lock() + defer cm.lock.Unlock() + for k, v := range cm.pool { + if v == c { + delete(cm.pool, k) + return + } + } +} diff --git a/core/netlib/decoder.go b/core/netlib/decoder.go new file mode 100644 index 0000000..4e152e6 --- /dev/null +++ b/core/netlib/decoder.go @@ -0,0 +1,88 @@ +package netlib + +import ( + "encoding/binary" + "errors" + "fmt" + "io" + "sync/atomic" +) + +var ( + DefaultProtocoDecoderName = "default-protocol-decoder" + protocolDecoders = make(map[string]ProtocolDecoder) + ErrRcvBufCannotGet = errors.New("Session rcvbuf get failed") +) + +type ProtocolDecoder interface { + Decode(s *Session, r io.Reader) (packetid int, logicNo uint32, packet interface{}, err error, raw []byte) + FinishDecode(s *Session) +} + +type DefaultProtocolDecoder struct { +} + +func (pdi *DefaultProtocolDecoder) Decode(s *Session, r io.Reader) (packetid int, logicNo uint32, packet interface{}, err error, raw []byte) { + if s.rcvbuf == nil { + s.rcvbuf = AllocRWBuf() + } + rdbuf := s.rcvbuf + if rdbuf == nil { + err = ErrRcvBufCannotGet + return + } + err = binary.Read(r, binary.LittleEndian, &rdbuf.pheader) + if err != nil { + return + } + + if int(rdbuf.pheader.Len) > MaxPacketSize { + err = fmt.Errorf("PacketHeader len exceed MaxPacket. get %v limit %v", rdbuf.pheader.Len, MaxPacketSize) + return + } + if rdbuf.pheader.Seq != rdbuf.seq+1 { + err = fmt.Errorf("PacketHeader sno not matched. get %v want %v", rdbuf.pheader.Seq, rdbuf.seq+1) + return + } + rdbuf.seq++ + logicNo = rdbuf.pheader.LogicNo + _, err = io.ReadFull(r, rdbuf.buf[0:rdbuf.pheader.Len]) + if err != nil { + return + } + raw = rdbuf.buf[0:rdbuf.pheader.Len] + packetid, packet, err = UnmarshalPacket(rdbuf.buf[0:rdbuf.pheader.Len]) + if err != nil { + return + } + + atomic.AddInt64(&s.recvedBytes, int64(int(rdbuf.pheader.Len)+LenOfProtoHeader)) + atomic.AddInt64(&s.recvedPack, 1) + return +} + +func (pdi *DefaultProtocolDecoder) FinishDecode(s *Session) { + if s.rcvbuf != nil { + FreeRWBuf(s.rcvbuf) + s.rcvbuf = nil + } +} + +func RegisteProtocolDecoder(name string, dec ProtocolDecoder) { + if _, exist := protocolDecoders[name]; exist { + panic("repeated registe protocol decoder:" + name) + } + protocolDecoders[name] = dec +} + +func GetProtocolDecoder(name string) ProtocolDecoder { + if dec, exist := protocolDecoders[name]; exist { + return dec + } + + return nil +} + +func init() { + RegisteProtocolDecoder(DefaultProtocoDecoderName, &DefaultProtocolDecoder{}) +} diff --git a/core/netlib/defaultprotocol.go b/core/netlib/defaultprotocol.go new file mode 100644 index 0000000..975c53a --- /dev/null +++ b/core/netlib/defaultprotocol.go @@ -0,0 +1,40 @@ +// protocol +package netlib + +import ( + "encoding/binary" + "fmt" +) + +var ( + LenOfPacketHeader int + LenOfProtoHeader int + MaxPacketSize int = 64 * 1024 +) + +type ProtoHeader struct { + Len uint16 //包长度 + Seq uint16 //包序号 + LogicNo uint32 //逻辑号 +} + +type PacketHeader struct { + EncodeType int16 + PacketId int16 +} + +type RWBuffer struct { + pheader ProtoHeader + seq uint16 + buf []byte +} + +func (rwb *RWBuffer) Init() { + rwb.seq = 0 +} + +func init() { + LenOfPacketHeader = binary.Size(&PacketHeader{}) + LenOfProtoHeader = binary.Size(&ProtoHeader{}) + fmt.Println("sizeof(PacketHeader)=", LenOfPacketHeader, " sizeof(ProtoHeader)=", LenOfProtoHeader) +} diff --git a/core/netlib/enc-binary.go b/core/netlib/enc-binary.go new file mode 100644 index 0000000..2f71be4 --- /dev/null +++ b/core/netlib/enc-binary.go @@ -0,0 +1,31 @@ +// binary +package netlib + +import ( + "bytes" + "encoding/binary" +) + +var Bcd = &BinaryEncDecoder{} + +type BinaryEncDecoder struct { +} + +func (this *BinaryEncDecoder) Unmarshal(buf []byte, pack interface{}) error { + return binary.Read(bytes.NewReader(buf), binary.LittleEndian, pack) +} + +func (this *BinaryEncDecoder) Marshal(pack interface{}) ([]byte, error) { + writer := bytes.NewBuffer(nil) + err := binary.Write(writer, binary.LittleEndian, pack) + return writer.Bytes(), err +} + +func init() { + RegisteEncoding(EncodingTypeBinary, Bcd, func(pack interface{}) int { + if _, ok := pack.([]byte); ok { + return EncodingTypeBinary + } + return -1 + }) +} diff --git a/core/netlib/enc-gbp.go b/core/netlib/enc-gbp.go new file mode 100644 index 0000000..a000b75 --- /dev/null +++ b/core/netlib/enc-gbp.go @@ -0,0 +1,45 @@ +// Gbp +package netlib + +import ( + "errors" + + "google.golang.org/protobuf/proto" +) + +var ErrorTypeNotFit = errors.New("packet not proto.Message type") + +var Gpb = &GbpEncDecoder{} + +type GbpEncDecoder struct { +} + +func (this *GbpEncDecoder) Unmarshal(buf []byte, pack interface{}) error { + if protomsg, ok := pack.(proto.Message); ok { + err := proto.Unmarshal(buf, protomsg) + if err != nil { + return err + } else { + return nil + } + } + + return ErrorTypeNotFit +} + +func (this *GbpEncDecoder) Marshal(pack interface{}) ([]byte, error) { + if protomsg, ok := pack.(proto.Message); ok { + return proto.Marshal(protomsg) + } + + return nil, ErrorTypeNotFit +} + +func init() { + RegisteEncoding(EncodingTypeGPB, Gpb, func(pack interface{}) int { + if _, ok := pack.(proto.Message); ok { + return EncodingTypeGPB + } + return -1 + }) +} diff --git a/core/netlib/enc-gob.go b/core/netlib/enc-gob.go new file mode 100644 index 0000000..d3a5e86 --- /dev/null +++ b/core/netlib/enc-gob.go @@ -0,0 +1,43 @@ +// Gob +package netlib + +import ( + "bytes" + "encoding/gob" +) + +var Gob = &GobEncDecoder{} + +type GobEncDecoder struct { +} + +func (this *GobEncDecoder) Unmarshal(buf []byte, pack interface{}) error { + network := bytes.NewBuffer(buf) + // Create a decoder and receive a value. + dec := gob.NewDecoder(network) + err := dec.Decode(pack) + if err != nil { + return err + } + + return nil +} + +func (this *GobEncDecoder) Marshal(pack interface{}) ([]byte, error) { + var network bytes.Buffer // Stand-in for the network. + + // Create an encoder and send a value. + enc := gob.NewEncoder(&network) + err := enc.Encode(pack) + if err != nil { + return nil, err + } + + return network.Bytes(), nil +} + +func init() { + RegisteEncoding(EncodingTypeGob, Gob, func(pack interface{}) int { + return EncodingTypeGob + }) +} diff --git a/core/netlib/enc-nill.go b/core/netlib/enc-nill.go new file mode 100644 index 0000000..ba81609 --- /dev/null +++ b/core/netlib/enc-nill.go @@ -0,0 +1,23 @@ +// nil +package netlib + +var Nil = &NilEncDecoder{} + +type NilEncDecoder struct { +} + +func (this *NilEncDecoder) Unmarshal(buf []byte, pack interface{}) error { + return nil +} + +func (this *NilEncDecoder) Marshal(pack interface{}) ([]byte, error) { + if binarymsg, ok := pack.([]byte); ok { + return binarymsg, nil + } + + return nil, ErrorTypeNotFit +} + +func init() { + RegisteEncoding(EncodingTypeNil, Nil, nil) +} diff --git a/core/netlib/encoder.go b/core/netlib/encoder.go new file mode 100644 index 0000000..aa17a81 --- /dev/null +++ b/core/netlib/encoder.go @@ -0,0 +1,148 @@ +package netlib + +import ( + "bytes" + "encoding/binary" + "errors" + "io" + "sync" + "sync/atomic" +) + +var ( + DefaultProtocolEncoderName = "default-protocol-encoder" + DefaultBuiltinProtocolEncoder = &DefaultProtocolEncoder{} + protocolEncoders = make(map[string]ProtocolEncoder) + ErrSndBufCannotGet = errors.New("Session sndbuf get failed") + ErrExceedMaxPacketSize = errors.New("exceed max packet size") +) + +type PacketCutSlicesFunc func(data []byte) (int, []interface{}) + +var bytesBufferPool = sync.Pool{ + New: func() interface{} { + return make([]byte, 0, 512) + }, +} + +type ProtocolEncoder interface { + Encode(s *Session, packetid int, logicNo uint32, packet interface{}, w io.Writer) (data []byte, err error) + FinishEncode(s *Session) +} + +type DefaultProtocolEncoder struct { + PacketCutor PacketCutSlicesFunc +} + +func (dec *DefaultProtocolEncoder) Encode(s *Session, packetid int, logicNo uint32, packet interface{}, w io.Writer) (data []byte, err error) { + if s.sndbuf == nil { + s.sndbuf = AllocRWBuf() + } + sndbuf := s.sndbuf + if sndbuf == nil { + err = ErrSndBufCannotGet + return + } + + var ( + ok bool + ) + if data, ok = packet.([]byte); !ok { + data, err = MarshalPacket(packetid, packet) + if err != nil { + return + } + } + + var size int = len(data) + if size > MaxPacketSize-LenOfProtoHeader { + if s.sc.SupportFragment { + err = dec.CutAndSendPacket(s, logicNo, data, w) + return + } else { + err = ErrExceedMaxPacketSize + return + } + } + + //fill packerHeader + sndbuf.seq++ + sndbuf.pheader.Len = uint16(size) + sndbuf.pheader.Seq = sndbuf.seq + sndbuf.pheader.LogicNo = logicNo + + buf := bytesBufferPool.Get().([]byte) + defer func() { + bytesBufferPool.Put(buf[:0]) + }() + ioBuf := bytes.NewBuffer(buf) + + //err = binary.Write(w, binary.LittleEndian, &sndbuf.pheader) + err = binary.Write(ioBuf, binary.LittleEndian, sndbuf.pheader.Len) + err = binary.Write(ioBuf, binary.LittleEndian, sndbuf.pheader.Seq) + err = binary.Write(ioBuf, binary.LittleEndian, sndbuf.pheader.LogicNo) + if err != nil { + return + } + + lenPack := len(data) + _, err = ioBuf.Write(data[:]) + if err != nil { + return + } + _, err = w.Write(ioBuf.Bytes()) + //_, err = w.Write(data[:]) + //_, err = io.Copy(w, bytes.NewBuffer(data)) + if err != nil { + return + } + + atomic.AddInt64(&s.sendedBytes, int64(lenPack+LenOfProtoHeader)) + atomic.AddInt64(&s.sendedPack, 1) + return +} + +func (dec *DefaultProtocolEncoder) CutAndSendPacket(s *Session, logicNo uint32, data []byte, w io.Writer) (err error) { + if dec.PacketCutor != nil { + packid, slices := dec.PacketCutor(data) + for i := 0; i < len(slices); i++ { + _, err = dec.Encode(s, packid, logicNo, slices[i], w) + if err != nil { + return + } + if s.scpl != nil { + err = s.scpl.onCutPacket(w) + if err != nil { + return + } + } + } + } + return +} + +func (dec *DefaultProtocolEncoder) FinishEncode(s *Session) { + if s.sndbuf != nil { + FreeRWBuf(s.sndbuf) + s.sndbuf = nil + } +} + +func RegisteProtocolEncoder(name string, enc ProtocolEncoder) { + if _, exist := protocolEncoders[name]; exist { + panic("repeated registe protocol encoder:" + name) + } + protocolEncoders[name] = enc +} + +func GetProtocolEncoder(name string) ProtocolEncoder { + if enc, exist := protocolEncoders[name]; exist { + return enc + } + + return nil +} + +func init() { + RegisteProtocolEncoder(DefaultProtocolEncoderName, DefaultBuiltinProtocolEncoder) +} diff --git a/core/netlib/encoder_test.go b/core/netlib/encoder_test.go new file mode 100644 index 0000000..27e6ec7 --- /dev/null +++ b/core/netlib/encoder_test.go @@ -0,0 +1,70 @@ +package netlib + +import ( + "testing" + "time" + + "google.golang.org/protobuf/proto" + "mongo.games.com/goserver/core/builtin/protocol" +) + +//func BenchmarkMarshalPacket(b *testing.B) { +// runtime.GOMAXPROCS(1) + +// c, err := net.Dial("tcp", "192.168.1.106:9999") +// if err != nil { +// log.Fatal(err) +// } +// if tcpconn, ok := c.(*net.TCPConn); ok { +// tcpconn.SetLinger(5) +// tcpconn.SetNoDelay(false) +// tcpconn.SetKeepAlive(false) +// tcpconn.SetReadBuffer(102400) +// tcpconn.SetWriteBuffer(10240000) +// } +// sc := &SessionConfig{} +// s := newTcpSession(1, c, sc, nil) + +// pck := &protocol.SSPacketAuth{AuthKey: proto.String("aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaabbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb"), Timestamp: proto.Int64(time.Now().Unix())} +// proto.SetDefaults(pck) +// tNow := time.Now() +// b.StartTimer() + +// w := bytes.NewBuffer(nil) + +// for i := 0; i < b.N; i++ { +// //for j := 0; j < 100; j++ { +// // b, err := MarshalPacket(pck) +// // if err == nil { +// // binary.Write(w, binary.LittleEndian, b) +// // } +// //} +//// pck2 := &protocol.SSPacketAuth{AuthKey: proto.String("aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaabbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb" /*hex.EncodeToString(w.Bytes())*/), Timestamp: proto.Int64(time.Now().Unix())} +//// DefaultBuiltinProtocolEncoder.Encode(s, pck2, s.conn) +// w.Reset() +// //Gpb.Marshal(pck) +// } + +// b.StopTimer() +// fmt.Println("==========", time.Now().Sub(tNow), " ==", b.N) +//} + +func BenchmarkTypetest(b *testing.B) { + pck := &protocol.SSPacketAuth{AuthKey: proto.String("aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaabbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb"), Timestamp: proto.Int64(time.Now().Unix())} + proto.SetDefaults(pck) + b.StartTimer() + for i := 0; i < b.N; i++ { + typetest(pck) + } + b.StopTimer() +} + +func BenchmarkGetPacketId(b *testing.B) { + pck := &protocol.SSPacketAuth{AuthKey: proto.String("aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaabbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb"), Timestamp: proto.Int64(time.Now().Unix())} + proto.SetDefaults(pck) + b.StartTimer() + for i := 0; i < b.N; i++ { + getPacketId(pck) + } + b.StopTimer() +} diff --git a/core/netlib/encoding.go b/core/netlib/encoding.go new file mode 100644 index 0000000..00788ad --- /dev/null +++ b/core/netlib/encoding.go @@ -0,0 +1,163 @@ +// encoding +package netlib + +import ( + "bytes" + "encoding/binary" + "fmt" + + "google.golang.org/protobuf/proto" +) + +const ( + EncodingTypeNil = iota + EncodingTypeGPB + EncodingTypeBinary + EncodingTypeGob + EncodingTypeMax +) + +var ( + encodingArray [EncodingTypeMax]EncDecoder + typeTesters [EncodingTypeMax]TypeTester +) + +type EncDecoder interface { + Unmarshal(buf []byte, pack interface{}) error + Marshal(pack interface{}) ([]byte, error) +} + +type UnparsePacketTypeErr struct { + EncodeType int16 + PacketId int16 + Err error +} + +type TypeTester func(pack interface{}) int + +func (this *UnparsePacketTypeErr) Error() string { + return fmt.Sprintf("cannot parse proto type:%v packetid:%v err:%v", this.EncodeType, this.PacketId, this.Err) +} + +func NewUnparsePacketTypeErr(et, packid int16, err error) *UnparsePacketTypeErr { + return &UnparsePacketTypeErr{EncodeType: et, PacketId: packid, Err: err} +} + +func UnmarshalPacket(data []byte) (int, interface{}, error) { + var ph PacketHeader + err := binary.Read(bytes.NewReader(data), binary.LittleEndian, &ph) + if err != nil { + return int(ph.PacketId), nil, err + } + + if ph.EncodeType >= EncodingTypeMax { + return int(ph.PacketId), nil, NewUnparsePacketTypeErr(ph.EncodeType, ph.PacketId, fmt.Errorf("EncodeType:%d unregiste", ph.EncodeType)) + } + + pck := CreatePacket(int(ph.PacketId)) + if pck == nil { + return int(ph.PacketId), nil, NewUnparsePacketTypeErr(ph.EncodeType, ph.PacketId, fmt.Errorf("packetId:%d unregiste", ph.PacketId)) + } else { + err = encodingArray[ph.EncodeType].Unmarshal(data[LenOfPacketHeader:], pck) + return int(ph.PacketId), pck, err + } + + return 0, nil, nil +} + +func MarshalPacket(packetid int, pack interface{}) ([]byte, error) { + et := typetest(pack) + if et < EncodingTypeNil || et > EncodingTypeMax { + return nil, fmt.Errorf("MarshalPacket unkown data type:%v", et) + } + + if encodingArray[et] == nil { + return nil, fmt.Errorf("MarshalPacket unkown data type:%v", et) + } + + data, err := encodingArray[et].Marshal(pack) + if err != nil { + return nil, fmt.Errorf("%v %v", pack, err.Error()) + } + + ph := PacketHeader{ + EncodeType: int16(et), + PacketId: int16(packetid), + } + + w := bytes.NewBuffer(nil) + binary.Write(w, binary.LittleEndian, &ph) + binary.Write(w, binary.LittleEndian, data) + return w.Bytes(), nil +} + +func MarshalPacketNoPackId(pack interface{}) (data []byte, err error) { + et := typetest(pack) + if et < EncodingTypeNil || et > EncodingTypeMax { + return nil, fmt.Errorf("MarshalPacket unkown data type:%v", et) + } + + if encodingArray[et] == nil { + return nil, fmt.Errorf("MarshalPacket unkown data type:%v", et) + } + + data, err = encodingArray[et].Marshal(pack) + if err != nil { + return nil, fmt.Errorf("%v %v", pack, err.Error()) + } + + ph := PacketHeader{ + EncodeType: int16(et), + PacketId: int16(0), + } + + w := bytes.NewBuffer(nil) + binary.Write(w, binary.LittleEndian, &ph) + binary.Write(w, binary.LittleEndian, data) + return w.Bytes(), nil +} + +func UnmarshalPacketNoPackId(data []byte, pck interface{}) error { + var ph PacketHeader + err := binary.Read(bytes.NewReader(data), binary.LittleEndian, &ph) + if err != nil { + return err + } + + if ph.EncodeType >= EncodingTypeMax { + return NewUnparsePacketTypeErr(ph.EncodeType, ph.PacketId, fmt.Errorf("EncodeType:%d unregiste", ph.EncodeType)) + } + + err = encodingArray[ph.EncodeType].Unmarshal(data[LenOfPacketHeader:], pck) + if err != nil { + return NewUnparsePacketTypeErr(ph.EncodeType, ph.PacketId, err) + } + return err +} + +func SkipHeaderGetRaw(data []byte) []byte { + if len(data) < LenOfPacketHeader { + return nil + } + return data[LenOfPacketHeader:] +} + +func typetest(pack interface{}) int { + switch pack.(type) { + case proto.Message: + return EncodingTypeGPB + case []byte: + return EncodingTypeBinary + default: + return EncodingTypeGob + } + return -1 +} + +func RegisteEncoding(edtype int, ed EncDecoder, tt TypeTester) { + if encodingArray[edtype] != nil { + panic(fmt.Sprintf("repeated registe EncDecoder %d", edtype)) + } + encodingArray[edtype] = ed + typeTesters[edtype] = tt +} diff --git a/core/netlib/error.go b/core/netlib/error.go new file mode 100644 index 0000000..6e1d818 --- /dev/null +++ b/core/netlib/error.go @@ -0,0 +1,15 @@ +// error +package netlib + +type NetLibParamError struct { + Src string + Param string +} + +func (self *NetLibParamError) Error() string { + return "Invalid Parameter: " + self.Src + self.Param +} + +func newNetLibParamError(src, param string) *NetLibParamError { + return &NetLibParamError{Src: src, Param: param} +} diff --git a/core/netlib/ioservice.go b/core/netlib/ioservice.go new file mode 100644 index 0000000..2d8f842 --- /dev/null +++ b/core/netlib/ioservice.go @@ -0,0 +1,33 @@ +package netlib + +type SessionStats struct { + Id int + GroupId int + RunningTime int64 + SendedBytes int64 + RecvedBytes int64 + SendedPack int64 + RecvedPack int64 + PendSendPack int + PendRecvPack int + RemoteAddr string +} + +type ServiceStats struct { + Id int + Type int + Name string + Addr string + MaxActive int + MaxDone int + RunningTime int64 + SessionStats []SessionStats +} + +type ioService interface { + start() error + update() + shutdown() + dump() + stats() ServiceStats +} diff --git a/core/netlib/netengine.go b/core/netlib/netengine.go new file mode 100644 index 0000000..3adbbd4 --- /dev/null +++ b/core/netlib/netengine.go @@ -0,0 +1,240 @@ +package netlib + +import ( + "errors" + "math" + "time" + + "mongo.games.com/goserver/core/logger" + "mongo.games.com/goserver/core/module" + "mongo.games.com/goserver/core/utils" +) + +const ( + IoServiceMaxCount int = 10 +) + +var ( + NetModule = newNetEngine() +) + +type NetEngine struct { + pool map[int]ioService + childAck chan int + backlogSc chan *SessionConfig + quit bool + reaped bool +} + +func newNetEngine() *NetEngine { + e := &NetEngine{ + pool: make(map[int]ioService), + childAck: make(chan int, IoServiceMaxCount), + backlogSc: make(chan *SessionConfig, IoServiceMaxCount), + } + + return e +} + +func (e *NetEngine) newIoService(sc *SessionConfig) ioService { + var s ioService + if sc.IsClient { + if !sc.AllowMultiConn && ConnectorMgr.IsConnecting(sc) { + return nil + } + switch sc.Protocol { + case "ws", "wss": + s = newWsConnector(e, sc) + case "udp": + s = newUdpConnector(e, sc) + default: + s = newTcpConnector(e, sc) + } + } else { + switch sc.Protocol { + case "ws", "wss": + s = newWsAcceptor(e, sc) + case "udp": + s = newUdpAcceptor(e, sc) + default: + s = newTcpAcceptor(e, sc) + } + } + return s +} + +func (e *NetEngine) GetAcceptors() []Acceptor { + acceptors := make([]Acceptor, 0, len(e.pool)) + for _, v := range e.pool { + if a, is := v.(Acceptor); is { + acceptors = append(acceptors, a) + } + } + + return acceptors +} + +func (e *NetEngine) Connect(sc *SessionConfig) error { + if e.quit { + return errors.New("NetEngine already quiting") + } + SendStartNetIoService(sc) + return nil +} + +func (e *NetEngine) Listen(sc *SessionConfig) error { + if e.quit { + return errors.New("NetEngine already quiting") + } + SendStartNetIoService(sc) + return nil +} + +func (e *NetEngine) ShutConnector(ip string, port int) { + for _, v := range e.pool { + if c, is := v.(Connector); is { + sc := c.GetSessionConfig() + if sc.Ip == ip && sc.Port == port { + c.shutdown() + return + } + } + } +} + +// ////////////////////////////////////////////////////////////////// +// / Module Implement [beg] +// ////////////////////////////////////////////////////////////////// +func (e *NetEngine) ModuleName() string { + return module.ModuleName_Net +} + +func (e *NetEngine) Init() { + var err error + for i := 0; i < len(Config.IoServices); i++ { + s := e.newIoService(&Config.IoServices[i]) + if s != nil { + e.pool[Config.IoServices[i].Id] = s + err = s.start() + if err != nil { + logger.Logger.Error(err) + } + } + } + + //time.AfterFunc(time.Minute*5, func() { e.dump() }) +} + +func (e *NetEngine) Update() { + defer utils.DumpStackIfPanic("NetEngine.Update") + + e.clearClosedIo() + + for _, v := range e.pool { + v.update() + } +} + +func (e *NetEngine) Shutdown() { + if e.quit { + return + } + + e.quit = true + + if len(e.pool) > 0 { + for _, v := range e.pool { + v.shutdown() + } + go e.reapRoutine() + } else { + e.destroy() + } +} + +//////////////////////////////////////////////////////////////////// +/// Module Implement [end] +//////////////////////////////////////////////////////////////////// + +func (e *NetEngine) clearClosedIo() { + for { + select { + case k := <-e.childAck: + delete(e.pool, k) + case sc := <-e.backlogSc: + s := e.newIoService(sc) + if s != nil { + e.pool[sc.Id] = s + err := s.start() + if err != nil { + logger.Logger.Error(err) + } + } + default: + return + } + } +} + +func (e *NetEngine) reapRoutine() { + if e.reaped { + return + } + + e.reaped = true + + for { + select { + case k := <-e.childAck: + delete(e.pool, k) + if len(e.pool) == 0 { + e.destroy() + return + } + } + } +} + +func (e *NetEngine) destroy() { + module.UnregisteModule(e) +} + +func (e *NetEngine) dump() { + for _, v := range e.pool { + v.dump() + } + time.AfterFunc(time.Minute*5, func() { e.dump() }) +} + +func (e *NetEngine) stats() map[int]ServiceStats { + stats := make(map[int]ServiceStats) + for k, v := range e.pool { + s := v.stats() + stats[k] = s + } + return stats +} + +func init() { + module.RegisteModule(NetModule, 0, math.MaxInt32) +} + +func Connect(sc *SessionConfig) error { + return NetModule.Connect(sc) +} + +func Listen(sc *SessionConfig) error { + return NetModule.Listen(sc) +} + +func GetAcceptors() []Acceptor { + return NetModule.GetAcceptors() +} + +func ShutConnector(ip string, port int) { + NetModule.ShutConnector(ip, port) +} + +func Stats() map[int]ServiceStats { + return NetModule.stats() +} diff --git a/core/netlib/packetfactory.go b/core/netlib/packetfactory.go new file mode 100644 index 0000000..f8be835 --- /dev/null +++ b/core/netlib/packetfactory.go @@ -0,0 +1,52 @@ +package netlib + +import ( + "fmt" + "reflect" +) + +var factories = make(map[int]PacketFactory) +var packetQuickMap = make(map[reflect.Type]packetInfo) + +type packetInfo struct { + ptype int + pid int +} + +type PacketFactory interface { + CreatePacket() interface{} +} + +type PacketFactoryWrapper func() interface{} + +func (pfw PacketFactoryWrapper) CreatePacket() interface{} { + return pfw() +} + +func RegisterFactory(packetId int, factory PacketFactory) { + if _, ok := factories[packetId]; ok { + panic(fmt.Sprintf("repeate register packet factory: %v", packetId)) + } + + factories[packetId] = factory + tp := factory.CreatePacket() + if tp != nil { + pt := typetest(tp) + packetQuickMap[reflect.TypeOf(tp)] = packetInfo{ptype: pt, pid: packetId} + } +} + +func CreatePacket(packetId int) interface{} { + if v, ok := factories[packetId]; ok { + return v.CreatePacket() + } + return nil +} + +func GetPacketTypeAndId(pack interface{}) (int, int) { + t := reflect.TypeOf(pack) + if tp, exist := packetQuickMap[t]; exist { + return tp.ptype, tp.pid + } + return 0, 0 +} diff --git a/core/netlib/packethandler.go b/core/netlib/packethandler.go new file mode 100644 index 0000000..4ca1c9f --- /dev/null +++ b/core/netlib/packethandler.go @@ -0,0 +1,55 @@ +// handler +package netlib + +import ( + "fmt" + "reflect" +) + +var handlers = make(map[int]Handler) + +type Handler interface { + Process(session *Session, packetid int, data interface{}) error +} + +type HandlerWrapper func(session *Session, packetid int, data interface{}) error + +func (hw HandlerWrapper) Process(session *Session, packetid int, data interface{}) error { + return hw(session, packetid, data) +} + +func RegisterHandler(packetId int, h Handler) { + if _, ok := handlers[packetId]; ok { + panic(fmt.Sprintf("repeate register handler: %v Handler type=%v", packetId, reflect.TypeOf(h))) + } + + handlers[packetId] = h +} + +func Register1ToMHandler(h Handler, packetIds ...int) { + for _, packetId := range packetIds { + RegisterHandler(packetId, h) + } +} + +func RegisterRangeHandler(start, end int, h Handler) { + for ; start <= end; start++ { + RegisterHandler(start, h) + } +} + +func GetHandler(packetId int) Handler { + if h, ok := handlers[packetId]; ok { + return h + } + + return nil +} + +func Register(mainId int, msgType interface{}, h func(session *Session, packetId int, data interface{}) error) { + f := func() interface{} { + return reflect.New(reflect.TypeOf(msgType)).Interface() + } + RegisterFactory(mainId, PacketFactoryWrapper(f)) + RegisterHandler(mainId, HandlerWrapper(h)) +} diff --git a/core/netlib/packetpool.go b/core/netlib/packetpool.go new file mode 100644 index 0000000..5c67c0a --- /dev/null +++ b/core/netlib/packetpool.go @@ -0,0 +1,71 @@ +package netlib + +import "sync" + +var pp = NewPacketPool(10240) + +func AllocPacket() *packet { + return pp.Get() +} + +func FreePacket(p *packet) { + pp.Give(p) +} + +type PacketPool struct { + free *packet + lock *sync.Mutex + num int + allocNum int + remainNum int +} + +func NewPacketPool(num int) *PacketPool { + pp := &PacketPool{ + lock: new(sync.Mutex), + num: num, + } + return pp +} + +func (pp *PacketPool) grow() { + var ( + i int + p *packet + ps = make([]packet, pp.num) + ) + pp.free = &(ps[0]) + p = pp.free + for i = 1; i < pp.num; i++ { + p.next = &(ps[i]) + p = p.next + } + p.next = nil + pp.allocNum += pp.num + pp.remainNum += pp.num + return +} + +func (pp *PacketPool) Get() (p *packet) { + pp.lock.Lock() + if p = pp.free; p == nil { + pp.grow() + p = pp.free + } + pp.free = p.next + p.next = nil + pp.remainNum-- + pp.lock.Unlock() + return +} + +func (pp *PacketPool) Give(p *packet) { + if p.next != nil { + return + } + pp.lock.Lock() + p.next = pp.free + pp.free = p + pp.remainNum++ + pp.lock.Unlock() +} diff --git a/core/netlib/recycler_action.go b/core/netlib/recycler_action.go new file mode 100644 index 0000000..224fbfa --- /dev/null +++ b/core/netlib/recycler_action.go @@ -0,0 +1,68 @@ +package netlib + +import "sync" + +var ap = NewActionPool(1024) + +func AllocAction() *action { + return ap.Get() +} + +func FreeAction(a *action) { + ap.Give(a) +} + +type ActionPool struct { + free *action + lock *sync.Mutex + num int + allocNum int + remainNum int +} + +func NewActionPool(num int) *ActionPool { + ap := &ActionPool{ + lock: new(sync.Mutex), + num: num, + } + return ap +} + +func (ap *ActionPool) grow() { + var ( + i int + a *action + as = make([]action, ap.num) + ) + ap.free = &(as[0]) + a = ap.free + for i = 1; i < ap.num; i++ { + a.next = &(as[i]) + a = a.next + } + a.next = nil + ap.allocNum += ap.num + ap.remainNum += ap.num + return +} + +func (ap *ActionPool) Get() (a *action) { + ap.lock.Lock() + if a = ap.free; a == nil { + ap.grow() + a = ap.free + } + ap.free = a.next + a.next = nil + ap.remainNum-- + ap.lock.Unlock() + return +} + +func (ap *ActionPool) Give(a *action) { + ap.lock.Lock() + a.next = ap.free + ap.free = a + ap.remainNum++ + ap.lock.Unlock() +} diff --git a/core/netlib/recycler_rwbuf.go b/core/netlib/recycler_rwbuf.go new file mode 100644 index 0000000..a9cb019 --- /dev/null +++ b/core/netlib/recycler_rwbuf.go @@ -0,0 +1,32 @@ +package netlib + +import ( + "mongo.games.com/goserver/core/container/recycler" +) + +const ( + RWBufRecyclerBacklog int = 128 +) + +var RWRecycler = recycler.NewRecycler( + RWBufRecyclerBacklog, + func() interface{} { + rb := &RWBuffer{ + buf: make([]byte, 0, MaxPacketSize), + } + + return rb + }, + "rwbuf_recycler", +) + +func AllocRWBuf() *RWBuffer { + b := RWRecycler.Get() + rb := b.(*RWBuffer) + rb.Init() + return rb +} + +func FreeRWBuf(buf *RWBuffer) { + RWRecycler.Give(buf) +} diff --git a/core/netlib/rpc_client.go b/core/netlib/rpc_client.go new file mode 100644 index 0000000..c301b57 --- /dev/null +++ b/core/netlib/rpc_client.go @@ -0,0 +1,149 @@ +package netlib + +import ( + "errors" + "mongo.games.com/goserver/core/builtin/protocol" + "mongo.games.com/goserver/core/logger" +) + +// ServerError represents an error that has been returned from +// the remote side of the RPC connection. +type ServerError string + +func (e ServerError) Error() string { + return string(e) +} + +var ErrShutdown = errors.New("connection is shut down") +var ErrSendBufFull = errors.New("sendbuf is full") +var ErrUnsupportRpc = errors.New("only inner session support rpc") + +// If set, print log statements for internal and I/O errors. +var debugRPCLog = false + +// Call represents an active RPC. +type Call struct { + ServiceMethod string // The name of the service and method to call. + Args interface{} // The argument to the function (*struct). + Reply interface{} // The reply from the function (*struct). + Error error // After completion, the error status. + Done chan *Call // Receives *Call when Go is complete. +} + +func (call *Call) done() { + select { + case call.Done <- call: + // ok + default: + // We don't want to block here. It is the caller's responsibility to make + // sure the channel has enough buffer space. See comment in Go(). + if debugRPCLog { + logger.Logger.Debugf("rpc: discarding Call reply due to insufficient Done chan capacity") + } + } +} + +// Go invokes the function asynchronously. It returns the Call structure representing +// the invocation. The done channel will signal when the call is complete by returning +// the same Call object. If done is nil, Go will allocate a new channel. +// If non-nil, done must be buffered or Go will deliberately crash. +func (s *Session) GoRpc(serviceMethod string, args interface{}, reply interface{}, done chan *Call) *Call { + call := new(Call) + call.ServiceMethod = serviceMethod + call.Args = args + call.Reply = reply + if !s.sc.IsInnerLink { + call.Error = ErrUnsupportRpc + return call + } + if done == nil { + done = make(chan *Call, 1) // buffered. + } else { + // If caller passes done != nil, it must arrange that + // done has enough buffer for the number of simultaneous + // RPCs that will be using that channel. If the channel + // is totally unbuffered, it's best not to run at all. + if cap(done) == 0 { + logger.Logger.Criticalf("rpc: done channel is unbuffered") + } + } + call.Done = done + s.sendRpcReq(call) + return call +} + +// Call invokes the named function, waits for it to complete, and returns its error status. +func (s *Session) CallRpc(serviceMethod string, args interface{}, reply interface{}) error { + call := <-s.GoRpc(serviceMethod, args, reply, make(chan *Call, 1)).Done + return call.Error +} + +func (s *Session) sendRpcReq(call *Call) { + // Register this call. + s.mutex.Lock() + if !s.isConned || s.quit || s.shutSend || s.shutRecv { + s.mutex.Unlock() + call.Error = ErrShutdown + call.done() + return + } + seq := s.seq + s.seq++ + s.pending[seq] = call + s.mutex.Unlock() + + // Encode and send the request. + req := &protocol.RpcRequest{ + ServiceMethod: call.ServiceMethod, + Seq: seq, + } + req.Args, _ = Gob.Marshal(call.Args) + + if !s.Send(int(protocol.CoreBuiltinPacketID_PACKET_SS_RPC_REQ), req, true) { + s.mutex.Lock() + call = s.pending[seq] + delete(s.pending, seq) + s.mutex.Unlock() + if call != nil { + call.Error = ErrSendBufFull + call.done() + } + } +} + +func (s *Session) onRpcResp(resp *protocol.RpcResponse) { + s.mutex.Lock() + call := s.pending[resp.Seq] + delete(s.pending, resp.Seq) + s.mutex.Unlock() + + switch { + case call == nil: + // We've got no pending call. That usually means that + // WriteRequest partially failed, and call was already + // removed; response is a server telling us about an + // error reading request body. We should still attempt + // to read error body, but there's no one to give it to. + case resp.Error != "": + // We've got an error response. Give this to the request; + // any subsequent requests will get the ReadResponseBody + // error if there is one. + call.Error = ServerError(resp.Error) + call.done() + default: + call.Error = Gob.Unmarshal(resp.Reply, call.Reply) + call.done() + } +} + +func init() { + RegisterHandler(int(protocol.CoreBuiltinPacketID_PACKET_SS_RPC_RESP), HandlerWrapper(func(s *Session, packetid int, data interface{}) error { + if resp, ok := data.(*protocol.RpcResponse); ok { + s.onRpcResp(resp) + } + return nil + })) + RegisterFactory(int(protocol.CoreBuiltinPacketID_PACKET_SS_RPC_RESP), PacketFactoryWrapper(func() interface{} { + return &protocol.RpcResponse{} + })) +} diff --git a/core/netlib/rpc_server.go b/core/netlib/rpc_server.go new file mode 100644 index 0000000..216f5e1 --- /dev/null +++ b/core/netlib/rpc_server.go @@ -0,0 +1,301 @@ +package netlib + +import ( + "errors" + "go/token" + "io" + "reflect" + "strings" + "sync" + + "mongo.games.com/goserver/core/builtin/protocol" + "mongo.games.com/goserver/core/logger" +) + +// Precompute the reflect type for error. Can't use error directly +// because Typeof takes an empty interface value. This is annoying. +var typeOfError = reflect.TypeOf((*error)(nil)).Elem() + +var serviceMap sync.Map // map[string]*service + +type methodType struct { + sync.Mutex // protects counters + method reflect.Method + ArgType reflect.Type + ReplyType reflect.Type + numCalls uint +} + +type service struct { + name string // name of service + rcvr reflect.Value // receiver of methods for the service + typ reflect.Type // type of the receiver + method map[string]*methodType // registered methods +} + +// Is this type exported or a builtin? +func isExportedOrBuiltinType(t reflect.Type) bool { + for t.Kind() == reflect.Ptr { + t = t.Elem() + } + // PkgPath will be non-empty even for an exported type, + // so we need to check the type name as well. + return token.IsExported(t.Name()) || t.PkgPath() == "" +} + +// Register publishes in the server the set of methods of the +// receiver value that satisfy the following conditions: +// - exported method of exported type +// - two arguments, both of exported type +// - the second argument is a pointer +// - one return value, of type error +// +// It returns an error if the receiver is not an exported type or has +// no suitable methods. It also logs the error using package log. +// The client accesses each method using a string of the form "Type.Method", +// where Type is the receiver's concrete type. +func RegisterRpc(rcvr interface{}) error { + return registerRpc(rcvr, "", false) +} + +// RegisterName is like Register but uses the provided name for the type +// instead of the receiver's concrete type. +func RegisterRpcName(name string, rcvr interface{}) error { + return registerRpc(rcvr, name, true) +} + +func registerRpc(rcvr interface{}, name string, useName bool) error { + s := new(service) + s.typ = reflect.TypeOf(rcvr) + s.rcvr = reflect.ValueOf(rcvr) + sname := reflect.Indirect(s.rcvr).Type().Name() + if useName { + sname = name + } + if sname == "" { + s := "rpc.Register: no service name for type " + s.typ.String() + logger.Logger.Debug(s) + return errors.New(s) + } + if !token.IsExported(sname) && !useName { + s := "rpc.Register: type " + sname + " is not exported" + logger.Logger.Debug(s) + return errors.New(s) + } + s.name = sname + + // Install the methods + s.method = suitableMethods(s.typ, true) + + if len(s.method) == 0 { + str := "" + + // To help the user, see if a pointer receiver would work. + method := suitableMethods(reflect.PtrTo(s.typ), false) + if len(method) != 0 { + str = "rpc.Register: type " + sname + " has no exported methods of suitable type (hint: pass a pointer to value of that type)" + } else { + str = "rpc.Register: type " + sname + " has no exported methods of suitable type" + } + logger.Logger.Debug(str) + return errors.New(str) + } + + if _, dup := serviceMap.LoadOrStore(sname, s); dup { + return errors.New("rpc: service already defined: " + sname) + } + return nil +} + +// suitableMethods returns suitable Rpc methods of typ, it will report +// error using log if reportErr is true. +func suitableMethods(typ reflect.Type, reportErr bool) map[string]*methodType { + methods := make(map[string]*methodType) + for m := 0; m < typ.NumMethod(); m++ { + method := typ.Method(m) + mtype := method.Type + mname := method.Name + // Method must be exported. + if method.PkgPath != "" { + continue + } + // Method needs three ins: receiver, *args, *reply. + if mtype.NumIn() != 3 { + if reportErr { + logger.Logger.Debugf("rpc.Register: method %q has %d input parameters; needs exactly three\n", mname, mtype.NumIn()) + } + continue + } + // First arg need not be a pointer. + argType := mtype.In(1) + if !isExportedOrBuiltinType(argType) { + if reportErr { + logger.Logger.Debugf("rpc.Register: argument type of method %q is not exported: %q\n", mname, argType) + } + continue + } + // Second arg must be a pointer. + replyType := mtype.In(2) + if replyType.Kind() != reflect.Ptr { + if reportErr { + logger.Logger.Debugf("rpc.Register: reply type of method %q is not a pointer: %q\n", mname, replyType) + } + continue + } + // Reply type must be exported. + if !isExportedOrBuiltinType(replyType) { + if reportErr { + logger.Logger.Debugf("rpc.Register: reply type of method %q is not exported: %q\n", mname, replyType) + } + continue + } + // Method needs one out. + if mtype.NumOut() != 1 { + if reportErr { + logger.Logger.Debugf("rpc.Register: method %q has %d output parameters; needs exactly one\n", mname, mtype.NumOut()) + } + continue + } + // The return type of the method must be error. + if returnType := mtype.Out(0); returnType != typeOfError { + if reportErr { + logger.Logger.Debugf("rpc.Register: return type of method %q is %q, must be error\n", mname, returnType) + } + continue + } + methods[mname] = &methodType{method: method, ArgType: argType, ReplyType: replyType} + } + return methods +} + +func (m *methodType) NumCalls() (n uint) { + m.Lock() + n = m.numCalls + m.Unlock() + return n +} + +func (s *service) call(sess *Session, mtype *methodType, req *protocol.RpcRequest, argv, replyv reflect.Value) { + mtype.Lock() + mtype.numCalls++ + mtype.Unlock() + function := mtype.method.Func + // Invoke the method, providing a new value for the reply. + returnValues := function.Call([]reflect.Value{s.rcvr, argv, replyv}) + // The return value for the method is an error. + errInter := returnValues[0].Interface() + errmsg := "" + if errInter != nil { + errmsg = errInter.(error).Error() + } + sess.sendRpcResp(req, replyv.Interface(), errmsg) +} + +// A value sent as a placeholder for the server's response value when the server +// receives an invalid request. It is never decoded by the client since the Response +// contains an error when it is used. +var invalidRequest = struct{}{} + +func (s *Session) sendRpcResp(req *protocol.RpcRequest, reply interface{}, errmsg string) { + resp := &protocol.RpcResponse{ + ServiceMethod: req.ServiceMethod, + Seq: req.Seq, + } + // Encode the response header + resp.ServiceMethod = req.ServiceMethod + if errmsg != "" { + resp.Error = errmsg + reply = invalidRequest + } + + var err error + resp.Reply, err = Gob.Marshal(reply) + if err != nil { + resp.Error = err.Error() + } + s.Send(int(protocol.CoreBuiltinPacketID_PACKET_SS_RPC_RESP), resp, true) +} + +func (s *Session) onRpcReq(req *protocol.RpcRequest) { + service, mtype, argv, replyv, err := readRequest(req) + if err != nil { + if debugRPCLog && err != io.EOF { + logger.Logger.Debug("rpc:", err) + } + // send a response if we actually managed to read a header. + if req != nil { + s.sendRpcResp(req, invalidRequest, err.Error()) + return + } + } + service.call(s, mtype, req, argv, replyv) +} + +func readRequest(req *protocol.RpcRequest) (service *service, mtype *methodType, argv, replyv reflect.Value, err error) { + service, mtype, err = parseRequest(req) + if err != nil { + return + } + + // Decode the argument value. + argIsValue := false // if true, need to indirect before calling. + if mtype.ArgType.Kind() == reflect.Ptr { + argv = reflect.New(mtype.ArgType.Elem()) + } else { + argv = reflect.New(mtype.ArgType) + argIsValue = true + } + // argv guaranteed to be a pointer now. + if err = Gob.Unmarshal(req.Args, argv.Interface()); err != nil { + return + } + + if argIsValue { + argv = argv.Elem() + } + + replyv = reflect.New(mtype.ReplyType.Elem()) + + switch mtype.ReplyType.Elem().Kind() { + case reflect.Map: + replyv.Elem().Set(reflect.MakeMap(mtype.ReplyType.Elem())) + case reflect.Slice: + replyv.Elem().Set(reflect.MakeSlice(mtype.ReplyType.Elem(), 0, 0)) + } + return +} + +func parseRequest(req *protocol.RpcRequest) (svc *service, mtype *methodType, err error) { + dot := strings.LastIndex(req.ServiceMethod, ".") + if dot < 0 { + err = errors.New("rpc: service/method request ill-formed: " + req.ServiceMethod) + return + } + serviceName := req.ServiceMethod[:dot] + methodName := req.ServiceMethod[dot+1:] + + // Look up the request. + svci, ok := serviceMap.Load(serviceName) + if !ok { + err = errors.New("rpc: can't find service " + req.ServiceMethod) + return + } + svc = svci.(*service) + mtype = svc.method[methodName] + if mtype == nil { + err = errors.New("rpc: can't find method " + req.ServiceMethod) + } + return +} + +func init() { + RegisterHandler(int(protocol.CoreBuiltinPacketID_PACKET_SS_RPC_REQ), HandlerWrapper(func(s *Session, packetid int, data interface{}) error { + if req, ok := data.(*protocol.RpcRequest); ok { + s.onRpcReq(req) + } + return nil + })) + RegisterFactory(int(protocol.CoreBuiltinPacketID_PACKET_SS_RPC_REQ), PacketFactoryWrapper(func() interface{} { + return &protocol.RpcRequest{} + })) +} diff --git a/core/netlib/session.go b/core/netlib/session.go new file mode 100644 index 0000000..dc1ed0a --- /dev/null +++ b/core/netlib/session.go @@ -0,0 +1,311 @@ +// session +package netlib + +import ( + "fmt" + "io" + "sync" + "sync/atomic" + "time" + + "mongo.games.com/goserver/core/container" + "mongo.games.com/goserver/core/logger" + "mongo.games.com/goserver/core/utils" +) + +type SessionCloseListener interface { + onClose(s ISession) +} + +type SessionCutPacketListener interface { + onCutPacket(w io.Writer) error +} + +type ISession interface { + SetAttribute(key, value interface{}) bool + RemoveAttribute(key interface{}) + GetAttribute(key interface{}) interface{} + GetSessionConfig() *SessionConfig + LocalAddr() string + RemoteAddr() string + IsIdle() bool + Close() + Send(packetid int, data interface{}, sync ...bool) bool + SendEx(packetid int, logicNo uint32, data interface{}, sync bool) bool + FireConnectEvent() bool + FireDisconnectEvent() bool + FirePacketReceived(packetid int, logicNo uint32, packet interface{}) bool + FirePacketSent(packetid int, logicNo uint32, data []byte) bool + FireSessionIdle() bool +} +type packet struct { + packetid int + logicno uint32 + data interface{} + next *packet +} + +type Session struct { + Id int + GroupId int + Sid int64 + Auth bool + impl ISession + sendBuffer chan *packet + recvBuffer chan *action + sc *SessionConfig + attributes *container.SynchronizedMap + scl SessionCloseListener + scpl SessionCutPacketListener + createTime time.Time + lastSndTime time.Time + lastRcvTime time.Time + waitor *utils.Waitor + rcvbuf *RWBuffer + sndbuf *RWBuffer + closed int32 + sendedBytes int64 + recvedBytes int64 + sendedPack int64 + recvedPack int64 + quit bool + shutSend bool + shutRecv bool + isConned bool + PendingRcv bool + PendingSnd bool + //rpc + mutex sync.Mutex // protects following + seq uint64 + pending map[uint64]*Call + //rpc +} + +func (s *Session) init() { + s.sendBuffer = make(chan *packet, s.sc.MaxPend) + s.recvBuffer = make(chan *action, s.sc.MaxDone) + s.attributes = container.NewSynchronizedMap() + if s.sc.IsInnerLink { + s.pending = make(map[uint64]*Call) + } +} + +func (s *Session) SetAttribute(key, value interface{}) bool { + return s.attributes.Set(key, value) +} + +func (s *Session) RemoveAttribute(key interface{}) { + s.attributes.Delete(key) +} + +func (s *Session) GetAttribute(key interface{}) interface{} { + return s.attributes.Get(key) +} + +func (s *Session) GetSessionConfig() *SessionConfig { + return s.sc +} + +func (s *Session) LocalAddr() string { + if s.impl != nil { + return s.impl.LocalAddr() + } + return "" +} + +func (s *Session) RemoteAddr() string { + if s.impl != nil { + return s.impl.RemoteAddr() + } + return "" +} + +func (s *Session) IsConned() bool { + return s.isConned +} + +func (s *Session) IsIdle() bool { + return s.lastRcvTime.Add(s.sc.IdleTimeout).Before(time.Now()) +} + +func (s *Session) Close() { + if !atomic.CompareAndSwapInt32(&s.closed, 0, 1) { + return + } + if s.quit { + return + } + s.quit = true + + if s.sc.IsInnerLink { + // Terminate pending calls. + s.mutex.Lock() + for _, call := range s.pending { + call.Error = ErrShutdown + call.done() + } + s.mutex.Unlock() + } + + go s.reapRoutine() +} + +func (s *Session) Send(packetid int, data interface{}, sync ...bool) bool { + if s.quit || s.shutSend { + return false + } + p := AllocPacket() + p.packetid = packetid + p.logicno = 0 + p.data = data + if len(sync) > 0 && sync[0] { + select { + case s.sendBuffer <- p: + case <-time.After(s.sc.WriteTimeout): + logger.Logger.Warn(s.Id, " send buffer full(", len(s.sendBuffer), "),data be droped(asyn), IsInnerLink ", + s.sc.IsInnerLink) + logger.Logger.Warn("Send session(sync) config desc:", *s.sc) + if s.sc.IsInnerLink == false { + s.Close() + } + return false + } + } else { + select { + case s.sendBuffer <- p: + default: + logger.Logger.Warn(s.Id, " send buffer full(", len(s.sendBuffer), "),data be droped(sync), IsInnerLink ", + s.sc.IsInnerLink) + logger.Logger.Warn("Send session(async) config desc:", *s.sc) + if s.sc.IsInnerLink == false { + s.Close() + } + return false + } + } + + return true +} + +func (s *Session) SendEx(packetid int, logicNo uint32, data interface{}, sync bool) bool { + if s.quit || s.shutSend { + return false + } + p := AllocPacket() + p.packetid = packetid + p.logicno = logicNo + p.data = data + if sync { + select { + case s.sendBuffer <- p: + case <-time.After(time.Duration(s.sc.WriteTimeout)): + logger.Logger.Warn(s.Id, " send buffer full(", len(s.sendBuffer), "),data be droped(asyn), IsInnerLink ", + s.sc.IsInnerLink) + logger.Logger.Warn("Send session(sync) config desc:", *s.sc) + if s.sc.IsInnerLink == false { + s.Close() + } + return false + } + } else { + select { + case s.sendBuffer <- p: + default: + logger.Logger.Warn(s.Id, " send buffer full(", len(s.sendBuffer), "),data be droped(sync), IsInnerLink ", + s.sc.IsInnerLink) + logger.Logger.Warn("Send session(async) config desc:", *s.sc) + if s.sc.IsInnerLink == false { + s.Close() + } + return false + } + } + + return true +} + +func (s *Session) FireConnectEvent() bool { + s.isConned = true + if s.sc.sfc != nil { + if !s.sc.sfc.OnSessionOpened(s) { + return false + } + } + if s.sc.shc != nil { + s.sc.shc.OnSessionOpened(s) + } + return true +} + +func (s *Session) FireDisconnectEvent() bool { + s.isConned = false + if s.sc.sfc != nil { + if !s.sc.sfc.OnSessionClosed(s) { + return false + } + } + if s.sc.shc != nil { + s.sc.shc.OnSessionClosed(s) + } + return true +} + +func (s *Session) FirePacketReceived(packetid int, logicNo uint32, packet interface{}) bool { + if s.sc.sfc != nil { + if !s.sc.sfc.OnPacketReceived(s, packetid, logicNo, packet) { + return false + } + } + if s.sc.shc != nil { + s.sc.shc.OnPacketReceived(s, packetid, logicNo, packet) + } + return true +} + +func (s *Session) FirePacketSent(packetid int, logicNo uint32, data []byte) bool { + if s.sc.sfc != nil { + if !s.sc.sfc.OnPacketSent(s, packetid, logicNo, data) { + return false + } + } + if s.sc.shc != nil { + s.sc.shc.OnPacketSent(s, packetid, logicNo, data) + } + return true +} + +func (s *Session) FireSessionIdle() bool { + if s.sc.sfc != nil { + if !s.sc.sfc.OnSessionIdle(s) { + return false + } + } + if s.sc.shc != nil { + s.sc.shc.OnSessionIdle(s) + } + return true +} + +func (s *Session) reapRoutine() { + defer func() { + if err := recover(); err != nil { + logger.Logger.Warn(s.Id, " reapRoutine panic : ", err) + } + }() + if !s.shutSend { + //close send goroutiue(throw a poison) + s.sendBuffer <- SendRoutinePoison + } + /* + if !s.shutRecv { + //close recv goroutiue + s.shutRead() + } + */ + s.waitor.Wait(fmt.Sprintf("Session.reapRoutine(%v_%v)", s.sc.Name, s.Id)) + s.scl.onClose(s) +} + +func (s *Session) destroy() { + s.FireDisconnectEvent() +} diff --git a/core/netlib/sessionfilter.go b/core/netlib/sessionfilter.go new file mode 100644 index 0000000..9b721eb --- /dev/null +++ b/core/netlib/sessionfilter.go @@ -0,0 +1,168 @@ +package netlib + +import ( + "container/list" +) + +var ( + sessionFilterCreatorPool = make(map[string]SessionFilterCreator) +) + +const ( + InterestOps_Opened uint = iota + InterestOps_Closed + InterestOps_Idle + InterestOps_Received + InterestOps_Sent + InterestOps_Max +) + +type SessionFilterCreator func() SessionFilter + +type SessionFilter interface { + GetName() string + GetInterestOps() uint + OnSessionOpened(s *Session) bool //run in main goroutine + OnSessionClosed(s *Session) bool //run in main goroutine + OnSessionIdle(s *Session) bool //run in main goroutine + OnPacketReceived(s *Session, packetid int, logicNo uint32, packet interface{}) bool //run in session receive goroutine + OnPacketSent(s *Session, packetid int, logicNo uint32, data []byte) bool //run in session send goroutine +} + +type BasicSessionFilter struct { +} + +func (bsf *BasicSessionFilter) GetName() string { return "BasicSessionFilter" } +func (bsf *BasicSessionFilter) GetInterestOps() uint { return 0 } +func (bsf *BasicSessionFilter) OnSessionOpened(s *Session) bool { return true } +func (bsf *BasicSessionFilter) OnSessionClosed(s *Session) bool { return true } +func (bsf *BasicSessionFilter) OnSessionIdle(s *Session) bool { return true } +func (bsf *BasicSessionFilter) OnPacketReceived(s *Session, packetid int, logicNo uint32, packet interface{}) bool { + return true +} +func (bsf *BasicSessionFilter) OnPacketSent(s *Session, packetid int, logicNo uint32, data []byte) bool { + return true +} + +type SessionFilterChain struct { + filters *list.List + filtersInterestOps [InterestOps_Max]*list.List +} + +func NewSessionFilterChain() *SessionFilterChain { + sfc := &SessionFilterChain{ + filters: list.New(), + } + for i := uint(0); i < InterestOps_Max; i++ { + sfc.filtersInterestOps[i] = list.New() + } + return sfc +} + +func (sfc *SessionFilterChain) AddFirst(sf SessionFilter) { + sfc.filters.PushFront(sf) + ops := sf.GetInterestOps() + for i := uint(0); i < InterestOps_Max; i++ { + if ops&(1< 0 { + for i = 0; i < a.sc.MaxDone; i++ { + select { + case data, ok := <-v.recvBuffer: + if !ok { + goto NEXT + } + data.do() + //nowork = false + doneCnt++ + default: + goto NEXT + } + } + } + NEXT: + //关闭idle + // if nowork && v.IsConned() && v.IsIdle() { + // v.FireSessionIdle() + // } + } + + if doneCnt > a.maxDone { + a.maxDone = doneCnt + } + if len(a.mapSessions) > a.maxActive { + a.maxActive = len(a.mapSessions) + } +} + +func (a *TcpAcceptor) dump() { + logger.Logger.Info("=========accept dump maxSessions=", a.maxActive, " maxDone=", a.maxDone) + for sid, s := range a.mapSessions { + logger.Logger.Info("=========session:", sid, " recvBuffer size=", len(s.recvBuffer), " sendBuffer size=", len(s.sendBuffer)) + } +} + +func (a *TcpAcceptor) stats() ServiceStats { + tNow := time.Now() + stats := ServiceStats{ + Id: a.sc.Id, + Type: a.sc.Type, + Name: a.sc.Name, + Addr: a.listener.Addr().String(), + MaxActive: a.maxActive, + MaxDone: a.maxDone, + RunningTime: int64(tNow.Sub(a.createTime) / time.Second), + } + + stats.SessionStats = make([]SessionStats, 0, len(a.mapSessions)) + for _, s := range a.mapSessions { + ss := SessionStats{ + Id: s.Id, + GroupId: s.GroupId, + SendedBytes: atomic.LoadInt64(&s.sendedBytes), + RecvedBytes: atomic.LoadInt64(&s.recvedBytes), + SendedPack: atomic.LoadInt64(&s.sendedPack), + RecvedPack: atomic.LoadInt64(&s.recvedPack), + PendSendPack: len(s.sendBuffer), + PendRecvPack: len(s.recvBuffer), + RemoteAddr: s.RemoteAddr(), + RunningTime: int64(tNow.Sub(s.createTime) / time.Second), + } + stats.SessionStats = append(stats.SessionStats, ss) + } + return stats +} + +func (a *TcpAcceptor) procChanEvent() { + for i := 0; i < a.sc.MaxDone; i++ { + select { + case s := <-a.acptChan: + a.procAccepted(s) + case s := <-a.reaper: + if tcps, ok := s.(*Session); ok { + a.procReap(tcps) + } + default: + return + } + } +} + +func (a *TcpAcceptor) GetSessionConfig() *SessionConfig { + return a.sc +} + +func (a *TcpAcceptor) Addr() net.Addr { + if a.listener != nil { + return a.listener.Addr() + } + return nil +} diff --git a/core/netlib/tcp_connector.go b/core/netlib/tcp_connector.go new file mode 100644 index 0000000..c5d9024 --- /dev/null +++ b/core/netlib/tcp_connector.go @@ -0,0 +1,246 @@ +// connector +package netlib + +import ( + "fmt" + "net" + "strconv" + "time" + + "mongo.games.com/goserver/core/logger" + "mongo.games.com/goserver/core/utils" + "sync/atomic" +) + +type TcpConnector struct { + sc *SessionConfig + e *NetEngine + s *TcpSession + idGen utils.IdGen + connChan chan net.Conn + reaper chan ISession + waitor *utils.Waitor + createTime time.Time + quit bool + reaped bool + maxActive int + maxDone int +} + +func newTcpConnector(e *NetEngine, sc *SessionConfig) *TcpConnector { + c := &TcpConnector{ + sc: sc, + e: e, + s: nil, + connChan: make(chan net.Conn, 2), + reaper: make(chan ISession, 1), + waitor: utils.NewWaitor("netlib.TcpConnector"), + createTime: time.Now(), + } + + ConnectorMgr.registeConnector(c) + return c +} + +func (c *TcpConnector) connectRoutine() { + name := fmt.Sprintf("TcpConnector.connectRoutine(%v_%v)", c.sc.Name, c.sc.Id) + c.waitor.Add(name, 1) + defer c.waitor.Done(name) + + service := c.sc.Ip + ":" + strconv.Itoa(int(c.sc.Port)) + conn, err := net.Dial("tcp", service) + if err == nil { + c.connChan <- conn + return + } + for { + select { + case <-time.After(ReconnectInterval): + if c.quit { + return + } + conn, err := net.Dial("tcp", service) + if err == nil { + if c.quit { + conn.Close() + return + } + c.connChan <- conn + return + } + } + } +} + +func (c *TcpConnector) start() error { + + go c.connectRoutine() + return nil +} + +func (c *TcpConnector) update() { + c.procActive() + c.procChanEvent() +} + +func (c *TcpConnector) shutdown() { + + if c.quit { + return + } + c.quit = true + + if c.s != nil { + c.s.Close() + } else { + go c.reapRoutine() + } +} + +func (c *TcpConnector) procActive() { + var i int + var doneCnt int + if c.s != nil && c.s.canShutdown() { + return + } else if c.s != nil && c.s.IsConned() { + if len(c.s.recvBuffer) > 0 { + for i = 0; i < c.sc.MaxDone; i++ { + select { + case data, ok := <-c.s.recvBuffer: + if !ok { + goto NEXT + } + data.do() + doneCnt++ + default: + goto NEXT + } + } + } + } +NEXT: + if doneCnt > c.maxDone { + c.maxDone = doneCnt + } +} + +func (c *TcpConnector) dump() { + logger.Logger.Info("=========connector dump maxDone=", c.maxDone) + logger.Logger.Info("=========session recvBuffer size=", len(c.s.recvBuffer), " sendBuffer size=", len(c.s.sendBuffer)) +} + +func (c *TcpConnector) procChanEvent() { + for { + select { + case conn := <-c.connChan: + c.procConnected(conn) + case s := <-c.reaper: + if tcps, ok := s.(*Session); ok { + c.procReap(tcps) + } + + default: + return + } + } +} + +func (c *TcpConnector) onClose(s ISession) { + c.reaper <- s +} + +func (c *TcpConnector) procConnected(conn net.Conn) { + if tcpconn, ok := conn.(*net.TCPConn); ok { + tcpconn.SetLinger(c.sc.SoLinger) + tcpconn.SetNoDelay(c.sc.NoDelay) + tcpconn.SetReadBuffer(c.sc.RcvBuff) + tcpconn.SetWriteBuffer(c.sc.SndBuff) + tcpconn.SetKeepAlive(c.sc.KeepAlive) + if c.sc.KeepAlive { + tcpconn.SetKeepAlivePeriod(c.sc.KeepAlivePeriod) + // err := tcpkeepalive.SetKeepAlive(conn, c.sc.KeepAliveIdle, c.sc.KeepAliveCount, c.sc.KeepAlivePeriod) + // if err != nil { + // logger.Logger.Warnf("(a *TcpConnector) procConnected SetKeepAlive err:%v", err) + // } + } + } + + c.s = newTcpSession(c.idGen.NextId(), conn, c.sc, c) + c.s.FireConnectEvent() + c.s.start() +} + +func (c *TcpConnector) procReap(s *Session) { + for len(s.recvBuffer) > 0 { + data, ok := <-s.recvBuffer + if !ok { + break + } + data.do() + } + + s.destroy() + + if (c.sc.IsAutoReconn == false && c.s.Id == s.Id) || c.quit { + c.s = nil + go c.reapRoutine() + } else if c.sc.IsAutoReconn && c.s.Id == s.Id { + c.s = nil + if !c.quit { + go c.connectRoutine() + } + } +} + +func (c *TcpConnector) reapRoutine() { + if c.reaped { + return + } + + c.reaped = true + + c.waitor.Wait(fmt.Sprintf("TcpConnector.reapRoutine_%v", c.sc.Id)) + select { + case conn := <-c.connChan: + conn.Close() + default: + } + c.e.childAck <- c.sc.Id + ConnectorMgr.unregisteConnector(c) +} + +func (c *TcpConnector) GetSessionConfig() *SessionConfig { + return c.sc +} + +func (c *TcpConnector) stats() ServiceStats { + tNow := time.Now() + stats := ServiceStats{ + Id: c.sc.Id, + Type: c.sc.Type, + Name: c.sc.Name, + MaxActive: 1, + MaxDone: c.maxDone, + RunningTime: int64(tNow.Sub(c.createTime) / time.Second), + } + + if c.s != nil { + stats.Addr = c.s.LocalAddr() + stats.SessionStats = []SessionStats{ + { + Id: c.s.Id, + GroupId: c.s.GroupId, + SendedBytes: atomic.LoadInt64(&c.s.sendedBytes), + RecvedBytes: atomic.LoadInt64(&c.s.recvedBytes), + SendedPack: atomic.LoadInt64(&c.s.sendedPack), + RecvedPack: atomic.LoadInt64(&c.s.recvedPack), + PendSendPack: len(c.s.sendBuffer), + PendRecvPack: len(c.s.recvBuffer), + RemoteAddr: c.s.RemoteAddr(), + RunningTime: int64(tNow.Sub(c.s.createTime) / time.Second), + }, + } + + } + return stats +} diff --git a/core/netlib/tcp_session.go b/core/netlib/tcp_session.go new file mode 100644 index 0000000..cf07cb1 --- /dev/null +++ b/core/netlib/tcp_session.go @@ -0,0 +1,227 @@ +// session +package netlib + +import ( + "fmt" + "net" + "runtime" + "strconv" + "time" + + "mongo.games.com/goserver/core/logger" + "mongo.games.com/goserver/core/utils" +) + +var ( + SendRoutinePoison *packet = nil +) + +type TcpSession struct { + Session + conn net.Conn +} + +func newTcpSession(id int, conn net.Conn, sc *SessionConfig, scl SessionCloseListener) *TcpSession { + s := &TcpSession{ + conn: conn, + } + s.Session.Id = id + s.Session.sc = sc + s.Session.scl = scl + s.Session.createTime = time.Now() + s.Session.waitor = utils.NewWaitor("netlib.TcpSession") + s.Session.impl = s + s.init() + + return s +} + +func (s *TcpSession) init() { + s.Session.init() +} + +func (s *TcpSession) LocalAddr() string { + return s.conn.LocalAddr().String() +} + +func (s *TcpSession) RemoteAddr() string { + return s.conn.RemoteAddr().String() +} + +func (s *TcpSession) start() { + s.lastRcvTime = time.Now() + go s.recvRoutine() + go s.sendRoutine() +} + +func (s *TcpSession) sendRoutine() { + name := fmt.Sprintf("TcpSession.sendRoutine(%v_%v)", s.sc.Name, s.Id) + s.waitor.Add(name, 1) + defer func() { + if err := recover(); err != nil { + if !s.sc.IsClient && s.sc.IsInnerLink { + logger.Logger.Warn(s.Id, " ->close: TcpSession.sendRoutine err: ", err) + } else { + logger.Logger.Trace(s.Id, " ->close: TcpSession.sendRoutine err: ", err) + } + } + s.sc.encoder.FinishEncode(&s.Session) + s.shutWrite() + s.shutRead() + s.Close() + s.waitor.Done(name) + }() + + var ( + err error + data []byte + ) + + for !s.quit || len(s.sendBuffer) != 0 { + if s.PendingSnd { + runtime.Gosched() + continue + } + select { + case packet, ok := <-s.sendBuffer: + if !ok { + panic("[comm expt]sendBuffer chan closed") + } + + if packet == nil { + panic("[comm expt]normal close send") + } + + if s.sc.IsInnerLink { + var timeZero time.Time + s.conn.SetWriteDeadline(timeZero) + } else { + if s.sc.WriteTimeout != 0 { + s.conn.SetWriteDeadline(time.Now().Add(s.sc.WriteTimeout)) + } + } + + data, err = s.sc.encoder.Encode(&s.Session, packet.packetid, packet.logicno, packet.data, s.conn) + if err != nil { + logger.Logger.Trace("s.sc.encoder.Encode err", err) + if s.sc.IsInnerLink == false { + FreePacket(packet) + panic(err) + } + } + FreePacket(packet) + s.FirePacketSent(packet.packetid, packet.logicno, data) + s.lastSndTime = time.Now() + } + } +} + +func (s *TcpSession) recvRoutine() { + name := fmt.Sprintf("TcpSession.recvRoutine(%v_%v)", s.sc.Name, s.Id) + s.waitor.Add(name, 1) + defer func() { + if err := recover(); err != nil { + if !s.sc.IsClient && s.sc.IsInnerLink { + logger.Logger.Warn(s.Id, " ->close: TcpSession.recvRoutine err: ", err) + } else { + logger.Logger.Trace(s.Id, " ->close: TcpSession.recvRoutine err: ", err) + } + } + s.sc.decoder.FinishDecode(&s.Session) + s.shutRead() + s.Close() + s.waitor.Done(name) + }() + + var ( + err error + pck interface{} + packetid int + logicNo uint32 + raw []byte + ) + + for { + if s.PendingRcv { + runtime.Gosched() + continue + } + if s.sc.IsInnerLink { + var timeZero time.Time + s.conn.SetReadDeadline(timeZero) + } else { + if s.sc.ReadTimeout != 0 { + s.conn.SetReadDeadline(time.Now().Add(s.sc.ReadTimeout)) + } + } + + packetid, logicNo, pck, err, raw = s.sc.decoder.Decode(&s.Session, s.conn) + if err != nil { + bUnproc := true + bPackErr := false + if _, ok := err.(*UnparsePacketTypeErr); ok { + bPackErr = true + if s.FirePacketReceived(packetid, logicNo, raw) { + if s.sc.eph != nil && s.sc.eph.OnUnknowPacket(&s.Session, packetid, logicNo, raw) { + bUnproc = false + } + } + } + if bUnproc { + logger.Logger.Tracef("s.sc.decoder.Decode(packetid:%v) err:%v ", packetid, err) + if s.sc.IsInnerLink == false { + panic(err) + } else if !bPackErr { + panic(err) + } + } + } + if pck != nil { + if s.FirePacketReceived(packetid, logicNo, pck) { + act := AllocAction() + act.s = &s.Session + act.p = pck + act.packid = packetid + act.logicNo = logicNo + act.n = "packet:" + strconv.Itoa(packetid) + s.recvBuffer <- act + } + } + s.lastRcvTime = time.Now() + } +} + +func (s *TcpSession) shutRead() { + if s.shutRecv { + return + } + logger.Logger.Trace(s.Id, " shutRead") + s.shutRecv = true + if tcpconn, ok := s.conn.(*net.TCPConn); ok { + tcpconn.CloseRead() + } +} + +func (s *TcpSession) shutWrite() { + if s.shutSend { + return + } + logger.Logger.Trace(s.Id, " shutWrite") + rest := len(s.sendBuffer) + for rest > 0 { + packet := <-s.sendBuffer + if packet != nil { + FreePacket(packet) + } + rest-- + } + + s.shutSend = true + if tcpconn, ok := s.conn.(*net.TCPConn); ok { + tcpconn.CloseWrite() + } +} + +func (s *TcpSession) canShutdown() bool { + return s.shutRecv && s.shutSend +} diff --git a/core/netlib/tcpkeepalive/LICENSE b/core/netlib/tcpkeepalive/LICENSE new file mode 100644 index 0000000..317ae5c --- /dev/null +++ b/core/netlib/tcpkeepalive/LICENSE @@ -0,0 +1,19 @@ +Copyright (c) 2014 Felix Geisendörfer (felix@debuggable.com) and contributors + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to deal + in the Software without restriction, including without limitation the rights + to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be included in + all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + THE SOFTWARE. diff --git a/core/netlib/tcpkeepalive/README.md b/core/netlib/tcpkeepalive/README.md new file mode 100644 index 0000000..4ef1b9b --- /dev/null +++ b/core/netlib/tcpkeepalive/README.md @@ -0,0 +1,15 @@ +# tcpkeepalive + +**Known Issues:** Some problems with the implementation were [reported](https://groups.google.com/d/msg/golang-nuts/rRu6ibLNdeI/TIzShZCmbzwJ), I'll try to fix them when I get a chance, or if somebody sends a PR. + +Package tcpkeepalive implements additional TCP keepalive control beyond what is +currently offered by the net pkg. + +Only Linux \>= 2.4, DragonFly, FreeBSD, NetBSD and OS X \>= 10.8 are supported +at this point, but patches for additional platforms are welcome. + +See also: http://felixge.de/2014/08/26/tcp-keepalive-with-golang.html + +**License:** MIT + +**Docs:** http://godoc.org/github.com/felixge/tcpkeepalive diff --git a/core/netlib/tcpkeepalive/keepalive.go b/core/netlib/tcpkeepalive/keepalive.go new file mode 100644 index 0000000..40e7b62 --- /dev/null +++ b/core/netlib/tcpkeepalive/keepalive.go @@ -0,0 +1,100 @@ +// Package tcpkeepalive implements additional TCP keepalive control beyond what +// is currently offered by the net pkg. +// +// Only Linux >= 2.4, DragonFly, FreeBSD, NetBSD and OS X >= 10.8 are supported +// at this point, but patches for additional platforms are welcome. +// +// See also: http://felixge.de/2014/08/26/tcp-keepalive-with-golang.html +package tcpkeepalive + +import ( + "fmt" + "net" + "os" + + "time" +) + +// EnableKeepAlive enables TCP keepalive for the given conn, which must be a +// *tcp.TCPConn. The returned Conn allows overwriting the default keepalive +// parameters used by the operating system. +func EnableKeepAlive(conn net.Conn) (*Conn, error) { + tcp, ok := conn.(*net.TCPConn) + if !ok { + return nil, fmt.Errorf("Bad conn type: %T", conn) + } + if err := tcp.SetKeepAlive(true); err != nil { + return nil, err + } + file, err := tcp.File() + if err != nil { + return nil, err + } + fd := int(file.Fd()) + return &Conn{TCPConn: tcp, fd: fd}, nil +} + +// Conn adds additional TCP keepalive control to a *net.TCPConn. +type Conn struct { + *net.TCPConn + fd int +} + +// SetKeepAliveIdle sets the time (in seconds) the connection needs to remain +// idle before TCP starts sending keepalive probes. +func (c *Conn) SetKeepAliveIdle(d time.Duration) error { + return setIdle(c.fd, secs(d)) +} + +// SetKeepAliveCount sets the maximum number of keepalive probes TCP should +// send before dropping the connection. +func (c *Conn) SetKeepAliveCount(n int) error { + return setCount(c.fd, n) +} + +// SetKeepAliveInterval sets the time (in seconds) between individual keepalive +// probes. +func (c *Conn) SetKeepAliveInterval(d time.Duration) error { + return c.SetKeepAliveInterval(d) +} + +func secs(d time.Duration) int { + d += (time.Second - time.Nanosecond) + return int(d.Seconds()) +} + +// Enable TCP keepalive in non-blocking mode with given settings for +// the connection, which must be a *tcp.TCPConn. +func SetKeepAlive(c net.Conn, idleTime time.Duration, count int, interval time.Duration) (err error) { + + conn, ok := c.(*net.TCPConn) + if !ok { + return fmt.Errorf("Bad connection type: %T", c) + } + + if err := conn.SetKeepAlive(true); err != nil { + return err + } + + if err := conn.SetKeepAlivePeriod(interval); err != nil { + return err + } + + var f *os.File + + if f, err = conn.File(); err == nil { + return err + } + + fd := int(f.Fd()) + + if err = setCount(fd, count); err != nil { + return err + } + + if err = setIdle(fd, secs(idleTime)); err != nil { + return err + } + + return nil +} diff --git a/core/netlib/tcpkeepalive/keepalive_bsd.go b/core/netlib/tcpkeepalive/keepalive_bsd.go new file mode 100644 index 0000000..71e157f --- /dev/null +++ b/core/netlib/tcpkeepalive/keepalive_bsd.go @@ -0,0 +1,20 @@ +// +build dragonfly freebsd netbsd + +package tcpkeepalive + +import ( + "os" + "syscall" +) + +func setIdle(fd int, secs int) error { + return os.NewSyscallError("setsockopt", syscall.SetsockoptInt(fd, syscall.IPPROTO_TCP, syscall.TCP_KEEPIDLE, secs)) +} + +func setCount(fd int, n int) error { + return os.NewSyscallError("setsockopt", syscall.SetsockoptInt(fd, syscall.IPPROTO_TCP, syscall.TCP_KEEPCNT, n)) +} + +func setInterval(fd int, secs int) error { + return os.NewSyscallError("setsockopt", syscall.SetsockoptInt(fd, syscall.IPPROTO_TCP, syscall.TCP_KEEPINTVL, secs)) +} diff --git a/core/netlib/tcpkeepalive/keepalive_darwin.go b/core/netlib/tcpkeepalive/keepalive_darwin.go new file mode 100644 index 0000000..9df4879 --- /dev/null +++ b/core/netlib/tcpkeepalive/keepalive_darwin.go @@ -0,0 +1,24 @@ +package tcpkeepalive + +import ( + "os" + "syscall" +) + +// from netinet/tcp.h (OS X 10.9.4) +const ( + _TCP_KEEPINTVL = 0x101 /* interval between keepalives */ + _TCP_KEEPCNT = 0x102 /* number of keepalives before close */ +) + +func setIdle(fd int, secs int) error { + return os.NewSyscallError("setsockopt", syscall.SetsockoptInt(fd, syscall.IPPROTO_TCP, syscall.TCP_KEEPALIVE, secs)) +} + +func setCount(fd int, n int) error { + return os.NewSyscallError("setsockopt", syscall.SetsockoptInt(fd, syscall.IPPROTO_TCP, _TCP_KEEPCNT, n)) +} + +func setInterval(fd int, secs int) error { + return os.NewSyscallError("setsockopt", syscall.SetsockoptInt(fd, syscall.IPPROTO_TCP, _TCP_KEEPINTVL, secs)) +} diff --git a/core/netlib/tcpkeepalive/keepalive_linux.go b/core/netlib/tcpkeepalive/keepalive_linux.go new file mode 100644 index 0000000..7e4f27a --- /dev/null +++ b/core/netlib/tcpkeepalive/keepalive_linux.go @@ -0,0 +1,18 @@ +package tcpkeepalive + +import ( + "os" + "syscall" +) + +func setIdle(fd int, secs int) error { + return os.NewSyscallError("setsockopt", syscall.SetsockoptInt(fd, syscall.IPPROTO_TCP, syscall.TCP_KEEPIDLE, secs)) +} + +func setCount(fd int, n int) error { + return os.NewSyscallError("setsockopt", syscall.SetsockoptInt(fd, syscall.IPPROTO_TCP, syscall.TCP_KEEPCNT, n)) +} + +func setInterval(fd int, secs int) error { + return os.NewSyscallError("setsockopt", syscall.SetsockoptInt(fd, syscall.IPPROTO_TCP, syscall.TCP_KEEPINTVL, secs)) +} diff --git a/core/netlib/tcpkeepalive/keepalive_solaris.go b/core/netlib/tcpkeepalive/keepalive_solaris.go new file mode 100644 index 0000000..7e4f27a --- /dev/null +++ b/core/netlib/tcpkeepalive/keepalive_solaris.go @@ -0,0 +1,18 @@ +package tcpkeepalive + +import ( + "os" + "syscall" +) + +func setIdle(fd int, secs int) error { + return os.NewSyscallError("setsockopt", syscall.SetsockoptInt(fd, syscall.IPPROTO_TCP, syscall.TCP_KEEPIDLE, secs)) +} + +func setCount(fd int, n int) error { + return os.NewSyscallError("setsockopt", syscall.SetsockoptInt(fd, syscall.IPPROTO_TCP, syscall.TCP_KEEPCNT, n)) +} + +func setInterval(fd int, secs int) error { + return os.NewSyscallError("setsockopt", syscall.SetsockoptInt(fd, syscall.IPPROTO_TCP, syscall.TCP_KEEPINTVL, secs)) +} diff --git a/core/netlib/tcpkeepalive/keepalive_windows.go b/core/netlib/tcpkeepalive/keepalive_windows.go new file mode 100644 index 0000000..487ab62 --- /dev/null +++ b/core/netlib/tcpkeepalive/keepalive_windows.go @@ -0,0 +1,13 @@ +package tcpkeepalive + +func setIdle(fd int, secs int) error { + return nil +} + +func setCount(fd int, n int) error { + return nil +} + +func setInterval(fd int, secs int) error { + return nil +} diff --git a/core/netlib/test.go b/core/netlib/test.go new file mode 100644 index 0000000..dc01731 --- /dev/null +++ b/core/netlib/test.go @@ -0,0 +1,21 @@ +package netlib + +import "testing" + +func TestAllocAction(t *testing.T) { + AllocAction() +} + +func TestFreeAction(t *testing.T) { + a := AllocAction() + FreeAction(a) +} + +func BenchmarkAllocAction(b *testing.B) { + tt := make([]*action, 0, b.N) + b.StartTimer() + for i := 0; i < b.N; i++ { + tt = append(tt, AllocAction()) + } + b.StopTimer() +} diff --git a/core/netlib/udp_acceptor.go b/core/netlib/udp_acceptor.go new file mode 100644 index 0000000..607aa8c --- /dev/null +++ b/core/netlib/udp_acceptor.go @@ -0,0 +1,292 @@ +// acceptor +package netlib + +import ( + "fmt" + "github.com/xtaci/kcp-go" + "net" + "strconv" + + "mongo.games.com/goserver/core/logger" + "mongo.games.com/goserver/core/utils" + "sync/atomic" + "time" +) + +type UdpAcceptor struct { + e *NetEngine + sc *SessionConfig + listener *kcp.Listener + idGen utils.IdGen + mapSessions map[int]*UdpSession + acptChan chan *UdpSession + connChan chan *kcp.UDPSession + reaper chan ISession + waitor *utils.Waitor + createTime time.Time + quit bool + reaped bool + restart bool + maxActive int + maxDone int +} + +func newUdpAcceptor(e *NetEngine, sc *SessionConfig) *UdpAcceptor { + a := &UdpAcceptor{ + e: e, + sc: sc, + } + + a.init() + + return a +} + +func (a *UdpAcceptor) init() { + backlog := int(a.sc.MaxConn + a.sc.ExtraConn) + a.connChan = make(chan *kcp.UDPSession, backlog) + a.acptChan = make(chan *UdpSession, backlog) + a.reaper = make(chan ISession, backlog) + a.mapSessions = make(map[int]*UdpSession) + a.waitor = utils.NewWaitor("netlib.UdpAcceptor") + a.createTime = time.Now() + a.quit = false +} + +func (a *UdpAcceptor) start() (err error) { + service := a.sc.Ip + ":" + strconv.Itoa(int(a.sc.Port)) + a.listener, err = kcp.ListenWithOptions(service, nil, 0, 0) + if err != nil { + logger.Logger.Error(err) + return err + } + logger.Logger.Info(a.sc.Name, " listen at ", a.listener.Addr().String()) + + go a.acceptRoutine() + go a.sessionRoutine() + + return nil +} + +func (a *UdpAcceptor) update() { + + a.procActive() + + a.procChanEvent() +} + +func (a *UdpAcceptor) shutdown() { + + if a.quit { + return + } + + a.quit = true + + if a.listener != nil { + a.listener.Close() + a.listener = nil + } + + if a.connChan != nil { + close(a.connChan) + a.connChan = nil + } + + if len(a.mapSessions) == 0 { + go a.reapRoutine() + } else { + for _, v := range a.mapSessions { + v.Close() + } + } +} + +func (a *UdpAcceptor) acceptRoutine() { + name := fmt.Sprintf("UdpAcceptor.acceptRoutine(%v_%v)", a.sc.Name, a.sc.Id) + a.waitor.Add(name, 1) + defer a.waitor.Done(name) + + for !a.quit { + conn, err := a.listener.AcceptKCP() + if err != nil { + logger.Logger.Warn(err) + if err.Error() == "timeout" { + continue + } + break + } + a.connChan <- conn + } + + //异常退出,需要重新启动 + if !a.quit { + a.shutdown() + a.restart = true + } +} + +func (a *UdpAcceptor) sessionRoutine() { + name := fmt.Sprintf("UdpAcceptor.sessionRoutine(%v_%v)", a.sc.Name, a.sc.Id) + a.waitor.Add(name, 1) + defer a.waitor.Done(name) + + for !a.quit { + select { + case conn, ok := <-a.connChan: + if !ok { //quiting(chan had closed) + return + } + s := newUdpSession(a.idGen.NextId(), conn, a.sc, a) + if s != nil { + s.conn.SetWindowSize(a.sc.MaxPend, a.sc.MaxPend) + if a.sc.NoDelay { + s.conn.SetNoDelay(1, 10, 2, 1) + } else { + s.conn.SetNoDelay(0, 40, 0, 0) + } + if a.sc.MTU > 128 && a.sc.MTU <= 1500 { //粗略的估算ip(最长60)+udp(8)+kcp(24)+proto(12) + s.conn.SetMtu(a.sc.MTU) + } + a.acptChan <- s + } + } + } +} + +func (a *UdpAcceptor) onClose(s ISession) { + a.reaper <- s +} + +func (a *UdpAcceptor) procReap(s *Session) { + if _, exist := a.mapSessions[s.Id]; exist { + delete(a.mapSessions, s.Id) + s.destroy() + } + + if a.quit { + if len(a.mapSessions) == 0 { + go a.reapRoutine() + } + } +} + +func (a *UdpAcceptor) reapRoutine() { + if a.reaped { + return + } + a.reaped = true + a.waitor.Wait(fmt.Sprintf("UdpAcceptor.reapRoutine_%v", a.sc.Id)) + + a.e.childAck <- a.sc.Id + if a.restart { //延迟1s后,重新启动 + time.Sleep(time.Second) + a.e.backlogSc <- a.sc + } +} + +func (a *UdpAcceptor) procAccepted(s *UdpSession) { + a.mapSessions[s.Id] = s + s.FireConnectEvent() + s.start() +} + +func (a *UdpAcceptor) procActive() { + var i int + //var nowork bool + var doneCnt int + for _, v := range a.mapSessions { + //nowork = true + if v.IsConned() && len(v.recvBuffer) > 0 { + for i = 0; i < a.sc.MaxDone; i++ { + select { + case data, ok := <-v.recvBuffer: + if !ok { + goto NEXT + } + data.do() + //nowork = false + doneCnt++ + default: + goto NEXT + } + } + } + NEXT: + //关闭idle + // if nowork && v.IsConned() && v.IsIdle() { + // v.FireSessionIdle() + // } + } + + if doneCnt > a.maxDone { + a.maxDone = doneCnt + } + if len(a.mapSessions) > a.maxActive { + a.maxActive = len(a.mapSessions) + } +} + +func (a *UdpAcceptor) dump() { + logger.Logger.Info("=========accept dump maxSessions=", a.maxActive, " maxDone=", a.maxDone) + for sid, s := range a.mapSessions { + logger.Logger.Info("=========session:", sid, " recvBuffer size=", len(s.recvBuffer), " sendBuffer size=", len(s.sendBuffer)) + } +} + +func (a *UdpAcceptor) stats() ServiceStats { + tNow := time.Now() + stats := ServiceStats{ + Id: a.sc.Id, + Type: a.sc.Type, + Name: a.sc.Name, + Addr: a.listener.Addr().String(), + MaxActive: a.maxActive, + MaxDone: a.maxDone, + RunningTime: int64(tNow.Sub(a.createTime) / time.Second), + } + + stats.SessionStats = make([]SessionStats, 0, len(a.mapSessions)) + for _, s := range a.mapSessions { + ss := SessionStats{ + Id: s.Id, + GroupId: s.GroupId, + SendedBytes: atomic.LoadInt64(&s.sendedBytes), + RecvedBytes: atomic.LoadInt64(&s.recvedBytes), + SendedPack: atomic.LoadInt64(&s.sendedPack), + RecvedPack: atomic.LoadInt64(&s.recvedPack), + PendSendPack: len(s.sendBuffer), + PendRecvPack: len(s.recvBuffer), + RemoteAddr: s.RemoteAddr(), + RunningTime: int64(tNow.Sub(s.createTime) / time.Second), + } + stats.SessionStats = append(stats.SessionStats, ss) + } + return stats +} + +func (a *UdpAcceptor) procChanEvent() { + for i := 0; i < a.sc.MaxDone; i++ { + select { + case s := <-a.acptChan: + a.procAccepted(s) + case s := <-a.reaper: + if tcps, ok := s.(*Session); ok { + a.procReap(tcps) + } + default: + return + } + } +} + +func (a *UdpAcceptor) GetSessionConfig() *SessionConfig { + return a.sc +} + +func (a *UdpAcceptor) Addr() net.Addr { + if a.listener != nil { + return a.listener.Addr() + } + return nil +} diff --git a/core/netlib/udp_connector.go b/core/netlib/udp_connector.go new file mode 100644 index 0000000..79c54eb --- /dev/null +++ b/core/netlib/udp_connector.go @@ -0,0 +1,242 @@ +// connector +package netlib + +import ( + "fmt" + "github.com/xtaci/kcp-go" + "strconv" + "time" + + "mongo.games.com/goserver/core/logger" + "mongo.games.com/goserver/core/utils" + "sync/atomic" +) + +type UdpConnector struct { + sc *SessionConfig + e *NetEngine + s *UdpSession + idGen utils.IdGen + connChan chan *kcp.UDPSession + reaper chan ISession + waitor *utils.Waitor + createTime time.Time + quit bool + reaped bool + maxActive int + maxDone int +} + +func newUdpConnector(e *NetEngine, sc *SessionConfig) *UdpConnector { + c := &UdpConnector{ + sc: sc, + e: e, + s: nil, + connChan: make(chan *kcp.UDPSession, 2), + reaper: make(chan ISession, 1), + waitor: utils.NewWaitor("netlib.UdpConnector"), + createTime: time.Now(), + } + + ConnectorMgr.registeConnector(c) + return c +} + +func (c *UdpConnector) connectRoutine() { + name := fmt.Sprintf("UdpConnector.connectRoutine(%v_%v)", c.sc.Name, c.sc.Id) + c.waitor.Add(name, 1) + defer c.waitor.Done(name) + + service := c.sc.Ip + ":" + strconv.Itoa(int(c.sc.Port)) + conn, err := kcp.DialWithOptions(service, nil, 0, 0) + if err == nil { + c.connChan <- conn + return + } + for { + select { + case <-time.After(ReconnectInterval): + if c.quit { + return + } + conn, err := kcp.DialWithOptions(service, nil, 0, 0) + if err == nil { + if c.quit { + conn.Close() + return + } + c.connChan <- conn + return + } + } + } +} + +func (c *UdpConnector) start() error { + + go c.connectRoutine() + return nil +} + +func (c *UdpConnector) update() { + c.procActive() + c.procChanEvent() +} + +func (c *UdpConnector) shutdown() { + + if c.quit { + return + } + c.quit = true + + if c.s != nil { + c.s.Close() + } else { + go c.reapRoutine() + } +} + +func (c *UdpConnector) procActive() { + var i int + var doneCnt int + if c.s != nil && c.s.canShutdown() { + return + } else if c.s != nil && c.s.IsConned() { + if len(c.s.recvBuffer) > 0 { + for i = 0; i < c.sc.MaxDone; i++ { + select { + case data, ok := <-c.s.recvBuffer: + if !ok { + goto NEXT + } + data.do() + doneCnt++ + default: + goto NEXT + } + } + } + } +NEXT: + if doneCnt > c.maxDone { + c.maxDone = doneCnt + } +} + +func (c *UdpConnector) dump() { + logger.Logger.Info("=========connector dump maxDone=", c.maxDone) + logger.Logger.Info("=========session recvBuffer size=", len(c.s.recvBuffer), " sendBuffer size=", len(c.s.sendBuffer)) +} + +func (c *UdpConnector) procChanEvent() { + for { + select { + case conn := <-c.connChan: + c.procConnected(conn) + case s := <-c.reaper: + if tcps, ok := s.(*Session); ok { + c.procReap(tcps) + } + + default: + return + } + } +} + +func (c *UdpConnector) onClose(s ISession) { + c.reaper <- s +} + +func (c *UdpConnector) procConnected(conn *kcp.UDPSession) { + c.s = newUdpSession(c.idGen.NextId(), conn, c.sc, c) + if c.s != nil { + c.s.conn.SetWindowSize(c.sc.MaxPend, c.sc.MaxPend) + if c.sc.NoDelay { + c.s.conn.SetNoDelay(1, 10, 2, 1) + } else { + c.s.conn.SetNoDelay(0, 40, 0, 0) + } + if c.sc.MTU > 128 && c.sc.MTU <= 1500 { //粗略的估算ip(最长60)+udp(8)+kcp(24)+proto(12) + c.s.conn.SetMtu(c.sc.MTU) + } + c.s.FireConnectEvent() + c.s.start() + } +} + +func (c *UdpConnector) procReap(s *Session) { + for len(s.recvBuffer) > 0 { + data, ok := <-s.recvBuffer + if !ok { + break + } + data.do() + } + + s.destroy() + + if (c.sc.IsAutoReconn == false && c.s.Id == s.Id) || c.quit { + c.s = nil + go c.reapRoutine() + } else if c.sc.IsAutoReconn && c.s.Id == s.Id { + c.s = nil + if !c.quit { + go c.connectRoutine() + } + } +} + +func (c *UdpConnector) reapRoutine() { + if c.reaped { + return + } + + c.reaped = true + + c.waitor.Wait(fmt.Sprintf("UdpConnector.reapRoutine_%v", c.sc.Id)) + select { + case conn := <-c.connChan: + conn.Close() + default: + } + c.e.childAck <- c.sc.Id + ConnectorMgr.unregisteConnector(c) +} + +func (c *UdpConnector) GetSessionConfig() *SessionConfig { + return c.sc +} + +func (c *UdpConnector) stats() ServiceStats { + tNow := time.Now() + stats := ServiceStats{ + Id: c.sc.Id, + Type: c.sc.Type, + Name: c.sc.Name, + MaxActive: 1, + MaxDone: c.maxDone, + RunningTime: int64(tNow.Sub(c.createTime) / time.Second), + } + + if c.s != nil { + stats.Addr = c.s.LocalAddr() + stats.SessionStats = []SessionStats{ + { + Id: c.s.Id, + GroupId: c.s.GroupId, + SendedBytes: atomic.LoadInt64(&c.s.sendedBytes), + RecvedBytes: atomic.LoadInt64(&c.s.recvedBytes), + SendedPack: atomic.LoadInt64(&c.s.sendedPack), + RecvedPack: atomic.LoadInt64(&c.s.recvedPack), + PendSendPack: len(c.s.sendBuffer), + PendRecvPack: len(c.s.recvBuffer), + RemoteAddr: c.s.RemoteAddr(), + RunningTime: int64(tNow.Sub(c.s.createTime) / time.Second), + }, + } + + } + return stats +} diff --git a/core/netlib/udp_session.go b/core/netlib/udp_session.go new file mode 100644 index 0000000..3badb73 --- /dev/null +++ b/core/netlib/udp_session.go @@ -0,0 +1,233 @@ +// session +package netlib + +import ( + "bytes" + "fmt" + "github.com/xtaci/kcp-go" + "runtime" + "strconv" + "time" + + "mongo.games.com/goserver/core/logger" + "mongo.games.com/goserver/core/utils" +) + +type UdpSession struct { + Session + conn *kcp.UDPSession +} + +func newUdpSession(id int, conn *kcp.UDPSession, sc *SessionConfig, scl SessionCloseListener) *UdpSession { + s := &UdpSession{ + conn: conn, + } + s.Session.Id = id + s.Session.sc = sc + s.Session.scl = scl + s.Session.createTime = time.Now() + s.Session.waitor = utils.NewWaitor("netlib.UdpSession") + s.Session.impl = s + s.init() + + return s +} + +func (s *UdpSession) init() { + s.Session.init() +} + +func (s *UdpSession) LocalAddr() string { + return s.conn.LocalAddr().String() +} + +func (s *UdpSession) RemoteAddr() string { + return s.conn.RemoteAddr().String() +} + +func (s *UdpSession) start() { + s.lastRcvTime = time.Now() + go s.recvRoutine() + go s.sendRoutine() +} + +func (s *UdpSession) sendRoutine() { + name := fmt.Sprintf("UdpSession.sendRoutine(%v_%v)", s.sc.Name, s.Id) + s.waitor.Add(name, 1) + defer func() { + if err := recover(); err != nil { + if !s.sc.IsClient && s.sc.IsInnerLink { + logger.Logger.Warn(s.Id, " ->close: UdpSession.sendRoutine err: ", err) + } else { + logger.Logger.Trace(s.Id, " ->close: UdpSession.sendRoutine err: ", err) + } + } + s.sc.encoder.FinishEncode(&s.Session) + s.shutWrite() + s.shutRead() + s.Close() + s.waitor.Done(name) + }() + + var ( + err error + data []byte + ) + + for !s.quit || len(s.sendBuffer) != 0 { + if s.PendingSnd { + runtime.Gosched() + continue + } + select { + case packet, ok := <-s.sendBuffer: + if !ok { + panic("[comm expt]sendBuffer chan closed") + } + + if packet == nil { + panic("[comm expt]normal close send") + } + + if s.sc.IsInnerLink { + var timeZero time.Time + s.conn.SetWriteDeadline(timeZero) + } else { + if s.sc.WriteTimeout != 0 { + s.conn.SetWriteDeadline(time.Now().Add(s.sc.WriteTimeout)) + } + } + + data, err = s.sc.encoder.Encode(&s.Session, packet.packetid, packet.logicno, packet.data, s.conn) + if err != nil { + logger.Logger.Trace("s.sc.encoder.Encode err", err) + if s.sc.IsInnerLink == false { + FreePacket(packet) + panic(err) + } + } + FreePacket(packet) + s.FirePacketSent(packet.packetid, packet.logicno, data) + s.lastSndTime = time.Now() + } + } +} + +func (s *UdpSession) recvRoutine() { + name := fmt.Sprintf("UdpSession.recvRoutine(%v_%v)", s.sc.Name, s.Id) + s.waitor.Add(name, 1) + defer func() { + if err := recover(); err != nil { + if !s.sc.IsClient && s.sc.IsInnerLink { + logger.Logger.Warn(s.Id, " ->close: UdpSession.recvRoutine err: ", err) + } else { + logger.Logger.Trace(s.Id, " ->close: UdpSession.recvRoutine err: ", err) + } + } + s.sc.decoder.FinishDecode(&s.Session) + s.shutRead() + s.Close() + s.waitor.Done(name) + }() + + var ( + err error + pck interface{} + packetid int + logicNo uint32 + raw []byte + n int + ) + + buf := make([]byte, s.sc.MaxPacket) + for { + if s.PendingRcv { + runtime.Gosched() + continue + } + if s.sc.IsInnerLink { + var timeZero time.Time + s.conn.SetReadDeadline(timeZero) + } else { + if s.sc.ReadTimeout != 0 { + s.conn.SetReadDeadline(time.Now().Add(s.sc.ReadTimeout)) + } + } + + n, err = s.conn.Read(buf) + if err != nil { + panic(err) + } + + packetid, logicNo, pck, err, raw = s.sc.decoder.Decode(&s.Session, bytes.NewBuffer(buf[:n])) + if err != nil { + bUnproc := true + bPackErr := false + if _, ok := err.(*UnparsePacketTypeErr); ok { + bPackErr = true + if s.FirePacketReceived(packetid, logicNo, raw) { + if s.sc.eph != nil && s.sc.eph.OnUnknowPacket(&s.Session, packetid, logicNo, raw) { + bUnproc = false + } + } + } + if bUnproc { + logger.Logger.Tracef("s.sc.decoder.Decode(packetid:%v) err:%v ", packetid, err) + if s.sc.IsInnerLink == false { + panic(err) + } else if !bPackErr { + panic(err) + } + } + } + if pck != nil { + if s.FirePacketReceived(packetid, logicNo, pck) { + act := AllocAction() + act.s = &s.Session + act.p = pck + act.packid = packetid + act.logicNo = logicNo + act.n = "packet:" + strconv.Itoa(packetid) + s.recvBuffer <- act + } + } + s.lastRcvTime = time.Now() + } +} + +func (s *UdpSession) shutRead() { + if s.shutRecv { + return + } + logger.Logger.Trace(s.Id, " shutRead") + s.shutRecv = true + if s.conn != nil { + s.conn.Close() + s.conn = nil + } +} + +func (s *UdpSession) shutWrite() { + if s.shutSend { + return + } + logger.Logger.Trace(s.Id, " shutWrite") + rest := len(s.sendBuffer) + for rest > 0 { + packet := <-s.sendBuffer + if packet != nil { + FreePacket(packet) + } + rest-- + } + + s.shutSend = true + if s.conn != nil { + s.conn.Close() + s.conn = nil + } +} + +func (s *UdpSession) canShutdown() bool { + return s.shutRecv && s.shutSend +} diff --git a/core/netlib/unknowpackethandler.go b/core/netlib/unknowpackethandler.go new file mode 100644 index 0000000..dabea13 --- /dev/null +++ b/core/netlib/unknowpackethandler.go @@ -0,0 +1,35 @@ +package netlib + +var ( + unknowPacketHandlerCreatorPool = make(map[string]UnknowPacketHandlerCreator) +) + +type UnknowPacketHandlerCreator func() UnknowPacketHandler + +type UnknowPacketHandler interface { + OnUnknowPacket(s *Session, packetid int, logicNo uint32, data []byte) bool //run in session receive goroutine +} + +type UnknowPacketHandlerWrapper func(session *Session, packetid int, logicNo uint32, data []byte) bool + +func (hw UnknowPacketHandlerWrapper) OnUnknowPacket(session *Session, packetid int, logicNo uint32, data []byte) bool { + return hw(session, packetid, logicNo, data) +} + +func RegisteUnknowPacketHandlerCreator(name string, ephc UnknowPacketHandlerCreator) { + if ephc == nil { + return + } + if _, exist := unknowPacketHandlerCreatorPool[name]; exist { + panic("repeate registe ErrorPacketHandler:" + name) + } + + unknowPacketHandlerCreatorPool[name] = ephc +} + +func GetUnknowPacketHandlerCreator(name string) UnknowPacketHandlerCreator { + if ephc, exist := unknowPacketHandlerCreatorPool[name]; exist { + return ephc + } + return nil +} diff --git a/core/netlib/ws_acceptor.go b/core/netlib/ws_acceptor.go new file mode 100644 index 0000000..9540f9d --- /dev/null +++ b/core/netlib/ws_acceptor.go @@ -0,0 +1,292 @@ +package netlib + +import ( + "crypto/tls" + "fmt" + "net" + "net/http" + "strconv" + "time" + + "github.com/gorilla/websocket" + "mongo.games.com/goserver/core/logger" + "mongo.games.com/goserver/core/utils" + "sync/atomic" +) + +const ( + // Time allowed to write a message to the client. + writeWait = 10 * time.Second + + // Time allowed to read the next message from the client. + readWait = 60 * time.Second + + // Send pings to client with this period. Must be less than readWait. + pingPeriod = (readWait * 9) / 10 +) + +type WsAcceptor struct { + e *NetEngine + sc *SessionConfig + idGen utils.IdGen + mapSessions map[int]*WsSession + reaper chan ISession + acptChan chan *WsSession + waitor *utils.Waitor + upgrader websocket.Upgrader + createTime time.Time + quit bool + reaped bool + maxActive int + maxDone int +} + +func newWsAcceptor(e *NetEngine, sc *SessionConfig) *WsAcceptor { + a := &WsAcceptor{ + e: e, + sc: sc, + quit: false, + mapSessions: make(map[int]*WsSession), + waitor: utils.NewWaitor("netlib.WsAcceptor"), + upgrader: websocket.Upgrader{ + ReadBufferSize: sc.RcvBuff, + WriteBufferSize: sc.SndBuff, + CheckOrigin: func(r *http.Request) bool { return true }, + }, + createTime: time.Now(), + } + + a.init() + + return a +} + +func (a *WsAcceptor) init() { + + temp := int(a.sc.MaxConn + a.sc.ExtraConn) + a.reaper = make(chan ISession, temp) + a.acptChan = make(chan *WsSession, temp) +} + +func (a *WsAcceptor) start() (err error) { + addr := a.sc.Ip + ":" + strconv.Itoa(int(a.sc.Port)) + + ln, err := net.Listen("tcp", addr) + if err != nil { + logger.Logger.Error(err) + return nil + } + + h := http.HandlerFunc(func(res http.ResponseWriter, req *http.Request) { + defer utils.DumpStackIfPanic("ws.HandlerFunc") + if req.Method != "GET" { + http.Error(res, "method not allowed", 405) + return + } + ws, err := a.upgrader.Upgrade(res, req, nil) + if _, ok := err.(websocket.HandshakeError); ok { + http.Error(res, "Not a websocket handshake", 400) + return + } else if err != nil { + http.Error(res, fmt.Sprintf("%v", err), 500) + logger.Logger.Error(err) + return + } + ws.SetPongHandler(func(string) error { + ws.SetReadDeadline(time.Now().Add(readWait)) + return nil + }) + s := newWsSession(a.idGen.NextId(), ws, a.sc, a) + a.acptChan <- s + }) + + if a.sc.Protocol == "wss" { + config := &tls.Config{} + config.NextProtos = []string{"http/1.1"} + + config.Certificates = make([]tls.Certificate, 1) + config.Certificates[0], err = tls.LoadX509KeyPair(a.sc.CertFile, a.sc.KeyFile) + if err != nil { + logger.Logger.Error(err) + return nil + } + + ln = tls.NewListener(ln, config) + } + + httpServer := &http.Server{ + Addr: addr, + Handler: h, + ReadTimeout: a.sc.ReadTimeout, + WriteTimeout: a.sc.WriteTimeout, + } + go httpServer.Serve(ln) + return nil +} + +func (a *WsAcceptor) update() { + a.procActive() + a.procChanEvent() +} + +func (a *WsAcceptor) shutdown() { + + if a.quit { + return + } + + a.quit = true + + if len(a.mapSessions) == 0 { + go a.reapRoutine() + } else { + for _, v := range a.mapSessions { + v.Close() + } + } +} + +func (a *WsAcceptor) onClose(s ISession) { + a.reaper <- s +} + +func (a *WsAcceptor) procReap(s *Session) { + if _, exist := a.mapSessions[s.Id]; exist { + delete(a.mapSessions, s.Id) + s.destroy() + } + + if a.quit { + if len(a.mapSessions) == 0 { + go a.reapRoutine() + } + } +} + +func (a *WsAcceptor) reapRoutine() { + if a.reaped { + return + } + a.reaped = true + a.waitor.Wait(fmt.Sprintf("WsAcceptor.reapRoutine_%v", a.sc.Id)) + + a.e.childAck <- a.sc.Id +} + +func (a *WsAcceptor) procAccepted(s *WsSession) { + a.mapSessions[s.Id] = s + s.FireConnectEvent() + s.start() +} + +func (a *WsAcceptor) procActive() { + var i int + //var nowork bool + var doneCnt int + for _, v := range a.mapSessions { + //nowork = true + if v.IsConned() && len(v.recvBuffer) > 0 { + for i = 0; i < a.sc.MaxDone; i++ { + select { + case data, ok := <-v.recvBuffer: + if !ok { + goto NEXT + } + data.do() + //nowork = false + doneCnt++ + default: + goto NEXT + } + } + } + NEXT: + //关闭idle + // if nowork && v.IsConned() && v.IsIdle() { + // v.FireSessionIdle() + // } + } + + if doneCnt > a.maxDone { + a.maxDone = doneCnt + } + if len(a.mapSessions) > a.maxActive { + a.maxActive = len(a.mapSessions) + } +} + +func (a *WsAcceptor) dump() { + logger.Logger.Info("=========wsaccept dump maxSessions=", a.maxActive, " maxDone=", a.maxDone) + for sid, s := range a.mapSessions { + logger.Logger.Info("=========wssession:", sid, " recvBuffer size=", len(s.recvBuffer), " sendBuffer size=", len(s.sendBuffer)) + } +} + +func (a *WsAcceptor) procChanEvent() { + for i := 0; i < a.sc.MaxDone; i++ { + select { + case s := <-a.acptChan: + a.procAccepted(s) + case s := <-a.reaper: + if ss, ok := s.(*Session); ok { + a.procReap(ss) + } + + default: + return + } + } +} + +func (a *WsAcceptor) GetSessionConfig() *SessionConfig { + return a.sc +} + +type WsAddr struct { + acceptor *WsAcceptor +} + +// name of the network +func (a *WsAddr) Network() string { + return "WS" +} + +// string form of address +func (a *WsAddr) String() string { + return fmt.Sprintf("%v:%v", a.acceptor.sc.Ip, a.acceptor.sc.Port) +} + +func (a *WsAcceptor) Addr() net.Addr { + return &WsAddr{acceptor: a} +} + +func (a *WsAcceptor) stats() ServiceStats { + tNow := time.Now() + stats := ServiceStats{ + Id: a.sc.Id, + Type: a.sc.Type, + Name: a.sc.Name, + Addr: a.Addr().String(), + MaxActive: a.maxActive, + MaxDone: a.maxDone, + RunningTime: int64(tNow.Sub(a.createTime) / time.Second), + } + + stats.SessionStats = make([]SessionStats, 0, len(a.mapSessions)) + for _, s := range a.mapSessions { + ss := SessionStats{ + Id: s.Id, + GroupId: s.GroupId, + SendedBytes: atomic.LoadInt64(&s.sendedBytes), + RecvedBytes: atomic.LoadInt64(&s.recvedBytes), + SendedPack: atomic.LoadInt64(&s.sendedPack), + RecvedPack: atomic.LoadInt64(&s.recvedPack), + PendSendPack: len(s.sendBuffer), + PendRecvPack: len(s.recvBuffer), + RemoteAddr: s.RemoteAddr(), + RunningTime: int64(tNow.Sub(a.createTime) / time.Second), + } + stats.SessionStats = append(stats.SessionStats, ss) + } + return stats +} diff --git a/core/netlib/ws_connector.go b/core/netlib/ws_connector.go new file mode 100644 index 0000000..f00d2dd --- /dev/null +++ b/core/netlib/ws_connector.go @@ -0,0 +1,231 @@ +// connector +package netlib + +import ( + "fmt" + "strconv" + "time" + + "github.com/gorilla/websocket" + "mongo.games.com/goserver/core/logger" + "mongo.games.com/goserver/core/utils" + "sync/atomic" +) + +type WsConnector struct { + sc *SessionConfig + e *NetEngine + s *WsSession + idGen utils.IdGen + connChan chan *websocket.Conn + reaper chan ISession + waitor *utils.Waitor + dialer websocket.Dialer + createTime time.Time + quit bool + reaped bool + maxActive int + maxDone int +} + +func newWsConnector(e *NetEngine, sc *SessionConfig) *WsConnector { + c := &WsConnector{ + sc: sc, + e: e, + s: nil, + connChan: make(chan *websocket.Conn, 2), + reaper: make(chan ISession, 1), + waitor: utils.NewWaitor("netlib.WsConnector"), + dialer: websocket.Dialer{ + ReadBufferSize: sc.RcvBuff, + WriteBufferSize: sc.SndBuff, + }, + createTime: time.Now(), + } + + ConnectorMgr.registeConnector(c) + return c +} + +func (c *WsConnector) connectRoutine() { + name := fmt.Sprintf("WsConnector.connectRoutine(%v_%v)", c.sc.Name, c.sc.Id) + c.waitor.Add(name, 1) + defer c.waitor.Done(name) + //ws + service := c.sc.Protocol + "://" + c.sc.Ip + ":" + strconv.Itoa(int(c.sc.Port)) + c.sc.Path + conn, _, err := c.dialer.Dial(service, nil) + if err == nil { + c.connChan <- conn + return + } + for { + select { + case <-time.After(ReconnectInterval): + if c.quit { + return + } + conn, _, err := c.dialer.Dial(service, nil) + if err == nil { + if c.quit { + conn.Close() + return + } + c.connChan <- conn + return + } + } + } +} + +func (c *WsConnector) start() error { + go c.connectRoutine() + return nil +} + +func (c *WsConnector) update() { + c.procActive() + c.procChanEvent() +} + +func (c *WsConnector) shutdown() { + if c.quit { + return + } + c.quit = true + + if c.s != nil { + c.s.Close() + } else { + go c.reapRoutine() + } +} + +func (c *WsConnector) procActive() { + var i int + var doneCnt int + if c.s == nil { + return + } else if c.s != nil && c.s.IsConned() { + if len(c.s.recvBuffer) > 0 { + for i = 0; i < c.sc.MaxDone; i++ { + select { + case data, ok := <-c.s.recvBuffer: + if !ok { + goto NEXT + } + data.do() + doneCnt++ + default: + goto NEXT + } + } + } + } +NEXT: + if doneCnt > c.maxDone { + c.maxDone = doneCnt + } +} + +func (c *WsConnector) dump() { + logger.Logger.Info("=========wsconnector dump maxDone=", c.maxDone) + logger.Logger.Info("=========wssession recvBuffer size=", len(c.s.recvBuffer), " sendBuffer size=", len(c.s.sendBuffer)) +} + +func (c *WsConnector) procChanEvent() { + for { + select { + case conn := <-c.connChan: + c.procConnected(conn) + case s := <-c.reaper: + if wss, ok := s.(*Session); ok { + c.procReap(wss) + } + + default: + return + } + } +} + +func (c *WsConnector) onClose(s ISession) { + c.reaper <- s +} + +func (c *WsConnector) procConnected(conn *websocket.Conn) { + c.s = newWsSession(c.idGen.NextId(), conn, c.sc, c) + c.s.FireConnectEvent() + c.s.start() +} + +func (c *WsConnector) procReap(s *Session) { + for len(s.recvBuffer) > 0 { + data, ok := <-s.recvBuffer + if !ok { + break + } + data.do() + } + + s.destroy() + + if (c.sc.IsAutoReconn == false && c.s.Id == s.Id) || c.quit { + c.s = nil + go c.reapRoutine() + } else if c.sc.IsAutoReconn && c.s.Id == s.Id { + c.s = nil + go c.connectRoutine() + } +} + +func (c *WsConnector) reapRoutine() { + if c.reaped { + return + } + + c.reaped = true + + c.waitor.Wait(fmt.Sprintf("WsConnector.reapRoutine_%v", c.sc.Id)) + select { + case conn := <-c.connChan: + conn.Close() + default: + } + c.e.childAck <- c.sc.Id + ConnectorMgr.unregisteConnector(c) +} + +func (c *WsConnector) GetSessionConfig() *SessionConfig { + return c.sc +} + +func (c *WsConnector) stats() ServiceStats { + tNow := time.Now() + stats := ServiceStats{ + Id: c.sc.Id, + Type: c.sc.Type, + Name: c.sc.Name, + MaxActive: 1, + MaxDone: c.maxDone, + Addr: c.sc.Protocol + "://" + c.sc.Ip + ":" + strconv.Itoa(int(c.sc.Port)) + c.sc.Path, + RunningTime: int64(tNow.Sub(c.createTime) / time.Second), + } + + if c.s != nil { + stats.SessionStats = []SessionStats{ + { + Id: c.s.Id, + GroupId: c.s.GroupId, + SendedBytes: atomic.LoadInt64(&c.s.sendedBytes), + RecvedBytes: atomic.LoadInt64(&c.s.recvedBytes), + SendedPack: atomic.LoadInt64(&c.s.sendedPack), + RecvedPack: atomic.LoadInt64(&c.s.recvedPack), + PendSendPack: len(c.s.sendBuffer), + PendRecvPack: len(c.s.recvBuffer), + RemoteAddr: c.s.RemoteAddr(), + RunningTime: int64(tNow.Sub(c.s.createTime) / time.Second), + }, + } + } + return stats +} diff --git a/core/netlib/ws_session.go b/core/netlib/ws_session.go new file mode 100644 index 0000000..116f882 --- /dev/null +++ b/core/netlib/ws_session.go @@ -0,0 +1,266 @@ +package netlib + +import ( + "bytes" + "io" + "runtime" + "strconv" + "time" + + "fmt" + + "github.com/gorilla/websocket" + "mongo.games.com/goserver/core/logger" + "mongo.games.com/goserver/core/utils" +) + +type WsSession struct { + Session + conn *websocket.Conn +} + +func newWsSession(id int, conn *websocket.Conn, sc *SessionConfig, scl SessionCloseListener) *WsSession { + s := &WsSession{ + conn: conn, + } + s.Session.Id = id + s.Session.sc = sc + s.Session.scl = scl + s.Session.scpl = s + s.Session.createTime = time.Now() + s.Session.waitor = utils.NewWaitor("netlib.WsSession") + s.Session.impl = s + s.init() + + return s +} + +func (s *WsSession) init() { + s.Session.init() +} + +func (s *WsSession) LocalAddr() string { + return s.conn.LocalAddr().String() +} + +func (s *WsSession) RemoteAddr() string { + return s.conn.RemoteAddr().String() +} + +func (s *WsSession) start() { + s.lastRcvTime = time.Now() + go s.sendRoutine() + go s.recvRoutine() +} + +func (s *WsSession) sendRoutine() { + name := fmt.Sprintf("WsSession.sendRoutine(%v_%v)", s.sc.Name, s.Id) + s.waitor.Add(name, 1) + ticker := time.NewTicker(pingPeriod) + defer func() { + if err := recover(); err != nil { + logger.Logger.Trace(s.Id, " ->close: Session.procSend err: ", err) + } + ticker.Stop() + s.sc.encoder.FinishEncode(&s.Session) + s.shutWrite() + s.Close() + s.waitor.Done(name) + }() + + b := make([]byte, s.sc.SndBuff) + buf := bytes.NewBuffer(b) + + var ( + err error + data []byte + ) + + for !s.quit || len(s.sendBuffer) != 0 { + if s.PendingSnd { + runtime.Gosched() + continue + } + select { + case packet, ok := <-s.sendBuffer: + if !ok { + s.write(websocket.CloseMessage, []byte{}) + panic("[comm expt]sendBuffer chan closed") + } + if packet == nil { + panic("[comm expt]normal close send") + } + buf.Reset() + data, err = s.sc.encoder.Encode(&s.Session, packet.packetid, packet.logicno, packet.data, buf) + if err != nil { + logger.Logger.Trace("s.sc.encoder.Encode err", err) + if s.sc.IsInnerLink == false { + FreePacket(packet) + panic(err) + } + } + FreePacket(packet) + if buf.Len() != 0 { + if s.sc.IsInnerLink { + var timeZero time.Time + s.conn.SetWriteDeadline(timeZero) + } else { + if s.sc.WriteTimeout != 0 { + s.conn.SetWriteDeadline(time.Now().Add(s.sc.WriteTimeout)) + } + } + + if err = s.write(websocket.BinaryMessage, buf.Bytes()); err != nil { + panic(err) + } + s.FirePacketSent(packet.packetid, packet.logicno, data) + s.lastSndTime = time.Now() + } + + case <-ticker.C: + if s.sc.AuthKey != "" && s.Auth { + if err := s.write(websocket.PingMessage, []byte{}); err != nil { + return + } + } + } + } +} + +func (s *WsSession) recvRoutine() { + name := fmt.Sprintf("WsSession.recvRoutine(%v_%v)", s.sc.Name, s.Id) + s.waitor.Add(name, 1) + defer func() { + if err := recover(); err != nil { + logger.Logger.Trace(s.Id, " ->close: Session.procRecv err: ", err) + } + s.sc.decoder.FinishDecode(&s.Session) + s.shutRead() + s.Close() + s.waitor.Done(name) + }() + + s.conn.SetReadLimit(int64(s.sc.MaxPacket)) + var ( + pck interface{} + packetid int + logicNo uint32 + raw []byte + ) + + for { + if s.PendingRcv { + runtime.Gosched() + continue + } + if s.sc.IsInnerLink { + var timeZero time.Time + s.conn.SetReadDeadline(timeZero) + } else { + if s.sc.ReadTimeout != 0 { + s.conn.SetReadDeadline(time.Now().Add(s.sc.ReadTimeout)) + } + } + op, r, err := s.conn.NextReader() + if err != nil { + logger.Logger.Info("s.conn.NextReader err:", err) + panic(err) + } + switch op { + case websocket.BinaryMessage: + packetid, logicNo, pck, err, raw = s.sc.decoder.Decode(&s.Session, r) + if err != nil { + bUnproc := true + bPackErr := false + if _, ok := err.(*UnparsePacketTypeErr); ok { + bPackErr = true + if s.FirePacketReceived(packetid, logicNo, raw) { + if s.sc.eph != nil && s.sc.eph.OnUnknowPacket(&s.Session, packetid, logicNo, raw) { + bUnproc = false + } + } + } + if bUnproc { + logger.Logger.Warnf("s.sc.decoder.Decode(packetid:%v) err:%v ", packetid, err) + if s.sc.IsInnerLink == false { + panic(err) + } else if !bPackErr { + panic(err) + } + } + } + if pck != nil { + if s.FirePacketReceived(packetid, logicNo, pck) { + act := AllocAction() + act.s = &s.Session + act.p = pck + act.packid = packetid + act.logicNo = logicNo + act.n = "packet:" + strconv.Itoa(packetid) + s.recvBuffer <- act + } + } + s.lastRcvTime = time.Now() + case websocket.TextMessage: + logger.Logger.Warnf("s.sc.decoder.Decode receive TextMessage. but not support!!!") + case websocket.CloseMessage: + logger.Logger.Warnf("s.sc.decoder.Decode receive CloseMessage!") + panic(fmt.Errorf("reveive remote CloseMessage")) + case websocket.PingMessage: + logger.Logger.Warnf("s.sc.decoder.Decode receive PingMessage!") + s.write(websocket.PongMessage, []byte{}) + case websocket.PongMessage: + logger.Logger.Warnf("s.sc.decoder.Decode receive PongMessage!") + } + } +} + +// write writes a message with the given opCode and payload. +func (s *WsSession) write(opCode int, payload []byte) error { + if !s.sc.IsInnerLink && s.sc.WriteTimeout != 0 { + s.conn.SetWriteDeadline(time.Now().Add(s.sc.WriteTimeout)) + } + return s.conn.WriteMessage(opCode, payload) +} + +func (s *WsSession) shutRead() { + if s.shutRecv { + return + } + logger.Logger.Trace(s.Id, " shutRead") + s.shutRecv = true + if s.conn != nil { + s.conn.Close() + s.conn = nil + } +} + +func (s *WsSession) shutWrite() { + if s.shutSend { + return + } + logger.Logger.Trace(s.Id, " shutWrite") + rest := len(s.sendBuffer) + for rest > 0 { + packet := <-s.sendBuffer + if packet != nil { + FreePacket(packet) + } + rest-- + } + + s.shutSend = true + if s.conn != nil { + s.conn.Close() + s.conn = nil + } +} + +func (s *WsSession) onCutPacket(w io.Writer) (err error) { + if buf, ok := w.(*bytes.Buffer); ok { + err = s.write(websocket.BinaryMessage, buf.Bytes()) + buf.Reset() + return + } + return +} diff --git a/core/profile/config.go b/core/profile/config.go new file mode 100644 index 0000000..efc1252 --- /dev/null +++ b/core/profile/config.go @@ -0,0 +1,30 @@ +package profile + +import ( + "mongo.games.com/goserver/core" +) + +var Config = Configuration{} + +type Configuration struct { + SlowMS int +} + +func (c *Configuration) Name() string { + return "profile" +} + +func (c *Configuration) Init() error { + if c.SlowMS <= 0 { + c.SlowMS = 1000 + } + return nil +} + +func (c *Configuration) Close() error { + return nil +} + +func init() { + core.RegistePackage(&Config) +} diff --git a/core/profile/recycler_watcher.go b/core/profile/recycler_watcher.go new file mode 100644 index 0000000..d40cf88 --- /dev/null +++ b/core/profile/recycler_watcher.go @@ -0,0 +1,68 @@ +package profile + +import "sync" + +var wp = NewWatcherPool(1024) + +func AllocWatcher() *TimeWatcher { + return wp.Get() +} + +func FreeWatcher(t *TimeWatcher) { + wp.Give(t) +} + +type WatcherPool struct { + free *TimeWatcher + lock *sync.Mutex + num int + allocNum int + remainNum int +} + +func NewWatcherPool(num int) *WatcherPool { + wp := &WatcherPool{ + lock: new(sync.Mutex), + num: num, + } + return wp +} + +func (wp *WatcherPool) grow() { + var ( + i int + t *TimeWatcher + ts = make([]TimeWatcher, wp.num) + ) + wp.free = &(ts[0]) + t = wp.free + for i = 1; i < wp.num; i++ { + t.next = &(ts[i]) + t = t.next + } + t.next = nil + wp.allocNum += wp.num + wp.remainNum += wp.num + return +} + +func (wp *WatcherPool) Get() (t *TimeWatcher) { + wp.lock.Lock() + if t = wp.free; t == nil { + wp.grow() + t = wp.free + } + wp.free = t.next + t.next = nil + wp.remainNum-- + wp.lock.Unlock() + return +} + +func (wp *WatcherPool) Give(t *TimeWatcher) { + wp.lock.Lock() + t.next = wp.free + wp.free = t + wp.remainNum++ + wp.lock.Unlock() +} diff --git a/core/profile/statistics.go b/core/profile/statistics.go new file mode 100644 index 0000000..b3be399 --- /dev/null +++ b/core/profile/statistics.go @@ -0,0 +1,113 @@ +package profile + +import ( + "fmt" + "io" + "strings" + "sync" + "time" + + "mongo.games.com/goserver/core/basic" + "mongo.games.com/goserver/core/logger" + "mongo.games.com/goserver/core/utils" +) + +const ( + TIME_ELEMENT_ACTION = iota + TIME_ELEMENT_TASK + TIME_ELEMENT_TIMER + TIME_ELEMENT_NET + TIME_ELEMENT_COMMAND + TIME_ELEMENT_MODULE + TIME_ELEMENT_JOB +) + +var TimeStatisticMgr = &timeStatisticMgr{ + elements: make(map[string]*TimeElement), +} + +type TimeElement struct { + Name string + ElementType int + Times int64 + TotalTick int64 + MaxTick int64 + MinTick int64 +} + +type timeStatisticMgr struct { + elements map[string]*TimeElement + l sync.RWMutex +} + +func (this *timeStatisticMgr) WatchStart(name string, elementype int) basic.IStatsWatch { + tw := newTimeWatcher(name, elementype) + return tw +} + +func (this *timeStatisticMgr) addStatistic(name string, elementype int, d int64) { + this.l.Lock() + te, exist := this.elements[name] + if !exist { + te = &TimeElement{ + Name: name, + ElementType: elementype, + Times: 1, + TotalTick: d, + MaxTick: d, + MinTick: d, + } + this.elements[name] = te + this.l.Unlock() + return + } + te.Times++ + te.TotalTick += d + if d < te.MinTick { + te.MinTick = d + } + if d > te.MaxTick { + te.MaxTick = d + if Config.SlowMS > 0 && d >= int64(Config.SlowMS)*int64(time.Millisecond) { + this.l.Unlock() + logger.Logger.Warnf("###slow timespan name: %s take:%s avg used:%s", strings.ToLower(te.Name), utils.ToS(time.Duration(d)), utils.ToS(time.Duration(te.TotalTick/te.Times))) + return + } + } + this.l.Unlock() +} + +func (this *timeStatisticMgr) GetStats() map[string]TimeElement { + elements := make(map[string]TimeElement) + this.l.RLock() + for k, v := range this.elements { + te := *v + te.TotalTick /= int64(time.Millisecond) + te.MinTick /= int64(time.Millisecond) + te.MaxTick /= int64(time.Millisecond) + elements[k] = te + } + this.l.RUnlock() + return elements +} + +func (this *timeStatisticMgr) dump(w io.Writer) { + elements := make(map[string]*TimeElement) + this.l.RLock() + for k, v := range this.elements { + elements[k] = v + } + this.l.RUnlock() + fmt.Fprintf(w, "| % -30s| % -10s | % -16s | % -16s | % -16s | % -16s |\n", "name", "times", "used", "max used", "min used", "avg used") + for k, v := range elements { + fmt.Fprintf(w, "| % -30s| % -10d | % -16s | % -16s | % -16s | % -16s |\n", strings.ToLower(k), v.Times, utils.ToS(time.Duration(v.TotalTick)), utils.ToS(time.Duration(v.MaxTick)), utils.ToS(time.Duration(v.MinTick)), utils.ToS(time.Duration(int64(v.TotalTick)/v.Times))) + } +} + +func GetStats() map[string]TimeElement { + return TimeStatisticMgr.GetStats() +} + +func init() { + basic.StatsWatchMgr = TimeStatisticMgr +} diff --git a/core/profile/timewatcher.go b/core/profile/timewatcher.go new file mode 100644 index 0000000..b62790a --- /dev/null +++ b/core/profile/timewatcher.go @@ -0,0 +1,24 @@ +package profile + +import "time" + +type TimeWatcher struct { + name string //模块名称 + elementype int //类型 + tStart time.Time //开始时间 + next *TimeWatcher +} + +func newTimeWatcher(name string, elementype int) *TimeWatcher { + w := AllocWatcher() + w.name = name + w.elementype = elementype + w.tStart = time.Now() + return w +} + +func (this *TimeWatcher) Stop() { + defer FreeWatcher(this) + d := time.Now().Sub(this.tStart) + TimeStatisticMgr.addStatistic(this.name, this.elementype, int64(d)) +} diff --git a/core/schedule/task.go b/core/schedule/task.go new file mode 100644 index 0000000..7748a62 --- /dev/null +++ b/core/schedule/task.go @@ -0,0 +1,661 @@ +package schedule + +import ( + "fmt" + "log" + "math" + "sort" + "strconv" + "strings" + "sync" + "time" + + "mongo.games.com/goserver/core" + "mongo.games.com/goserver/core/profile" + "mongo.games.com/goserver/core/utils" + "sync/atomic" +) + +// bounds provides a range of acceptable values (plus a map of name to value). +type bounds struct { + min, max uint + names map[string]uint +} + +type TaskStats struct { + RunTimes int64 + ErrTimes int64 + Prev time.Time + Next time.Time +} + +// The bounds for each field. +var ( + taskStats = new(sync.Map) + adminTaskList map[string]Tasker + lock sync.Mutex + stop chan bool + resume chan bool + seconds = bounds{0, 59, nil} + minutes = bounds{0, 59, nil} + hours = bounds{0, 23, nil} + days = bounds{1, 31, nil} + months = bounds{1, 12, map[string]uint{ + "jan": 1, + "feb": 2, + "mar": 3, + "apr": 4, + "may": 5, + "jun": 6, + "jul": 7, + "aug": 8, + "sep": 9, + "oct": 10, + "nov": 11, + "dec": 12, + }} + weeks = bounds{0, 6, map[string]uint{ + "sun": 0, + "mon": 1, + "tue": 2, + "wed": 3, + "thu": 4, + "fri": 5, + "sat": 6, + }} +) + +const ( + // Set the top bit if a star was included in the expression. + starBit = 1 << 63 +) + +type Schedule struct { + Second uint64 + Minute uint64 + Hour uint64 + Day uint64 + Month uint64 + Week uint64 +} + +type TaskFunc func() error + +type Tasker interface { + GetStatus() string + Run() error + SetNext(time.Time) + GetNext() time.Time + SetPrev(time.Time) + GetPrev() time.Time +} + +type taskerr struct { + t time.Time + errinfo string +} + +type Task struct { + Taskname string + Spec *Schedule + DoFunc TaskFunc + Prev time.Time + Next time.Time + Errlist []*taskerr //errtime:errinfo + ErrLimit int //max length for the errlist 0 stand for there' no limit +} + +func NewTask(tname string, spec string, f TaskFunc) *Task { + + task := &Task{ + Taskname: tname, + DoFunc: f, + ErrLimit: 100, + } + task.SetCron(spec) + return task +} + +func (tk *Task) GetStatus() string { + var str string + for _, v := range tk.Errlist { + str += v.t.String() + ":" + v.errinfo + "\n" + } + return str +} + +func (tk *Task) Run() error { + defer utils.DumpStackIfPanic("Task Run") + name := fmt.Sprintf("Task(%v)", tk.Taskname) + core.CoreObject().Waitor.Add(name, 1) + defer core.CoreObject().Waitor.Done(name) + + var stats *TaskStats + s, exist := taskStats.Load(tk.Taskname) + if s == nil || !exist { + stats = &TaskStats{ + Prev: tk.Prev, + Next: tk.Next, + RunTimes: 1, + } + taskStats.Store(tk.Taskname, stats) + } else { + stats = s.(*TaskStats) + if stats != nil { + atomic.AddInt64(&stats.RunTimes, 1) + } + } + + //监控运行时间 + watch := profile.TimeStatisticMgr.WatchStart(fmt.Sprintf("/job/%v/run", tk.Taskname), profile.TIME_ELEMENT_JOB) + if watch != nil { + defer watch.Stop() + } + + err := tk.DoFunc() + if err != nil { + if stats != nil { + atomic.AddInt64(&stats.ErrTimes, 1) + } + if tk.ErrLimit > 0 && tk.ErrLimit > len(tk.Errlist) { + tk.Errlist = append(tk.Errlist, &taskerr{t: tk.Next, errinfo: err.Error()}) + } + } + return err +} + +func (tk *Task) SetNext(now time.Time) { + tk.Next = tk.Spec.Next(now) +} + +func (tk *Task) GetNext() time.Time { + return tk.Next +} +func (tk *Task) SetPrev(now time.Time) { + tk.Prev = now +} + +func (tk *Task) GetPrev() time.Time { + return tk.Prev +} + +//前6个字段分别表示: +// 秒钟:0-59 +// 分钟:0-59 +// 小时:1-23 +// 日期:1-31 +// 月份:1-12 +// 星期:0-6(0表示周日) + +// 还可以用一些特殊符号: +// +// *: 表示任何时刻 +// ,: 表示分割,如第三段里:2,4,表示2点和4点执行 +// +//    -:表示一个段,如第三端里: 1-5,就表示1到5点 +// +// /n : 表示每个n的单位执行一次,如第三段里,*/1, 就表示每隔1个小时执行一次命令。也可以写成1-23/1. +// +// /////////////////////////////////////////////////////// +// +// 0/30 * * * * * 每30秒 执行 +// 0 43 21 * * * 21:43 执行 +// 0 15 05 * * *    05:15 执行 +// 0 0 17 * * * 17:00 执行 +// 0 0 17 * * 1 每周一的 17:00 执行 +// 0 0,10 17 * * 0,2,3 每周日,周二,周三的 17:00和 17:10 执行 +// 0 0-10 17 1 * * 毎月1日从 17:00到7:10 毎隔1分钟 执行 +// 0 0 0 1,15 * 1 毎月1日和 15日和 一日的 0:00 执行 +// 0 42 4 1 * *     毎月1日的 4:42分 执行 +// 0 0 21 * * 1-6   周一到周六 21:00 执行 +// 0 0,10,20,30,40,50 * * * *  每隔10分 执行 +// 0 */10 * * * *        每隔10分 执行 +// 0 * 1 * * *         从1:0到1:59 每隔1分钟 执行 +// 0 0 1 * * *         1:00 执行 +// 0 0 */1 * * *        毎时0分 每隔1小时 执行 +// 0 0 * * * *         毎时0分 每隔1小时 执行 +// 0 2 8-20/3 * * *       8:02,11:02,14:02,17:02,20:02 执行 +// 0 30 5 1,15 * *       1日 和 15日的 5:30 执行 +func (t *Task) SetCron(spec string) { + t.Spec = t.parse(spec) +} + +func (t *Task) parse(spec string) *Schedule { + if len(spec) > 0 && spec[0] == '@' { + return t.parseSpec(spec) + } + // Split on whitespace. We require 5 or 6 fields. + // (second) (minute) (hour) (day of month) (month) (day of week, optional) + fields := strings.Fields(spec) + if len(fields) != 5 && len(fields) != 6 { + log.Panicf("Expected 5 or 6 fields, found %d: %s", len(fields), spec) + } + + // If a sixth field is not provided (DayOfWeek), then it is equivalent to star. + if len(fields) == 5 { + fields = append(fields, "*") + } + + schedule := &Schedule{ + Second: getField(fields[0], seconds), + Minute: getField(fields[1], minutes), + Hour: getField(fields[2], hours), + Day: getField(fields[3], days), + Month: getField(fields[4], months), + Week: getField(fields[5], weeks), + } + + return schedule +} + +func (t *Task) parseSpec(spec string) *Schedule { + switch spec { + case "@yearly", "@annually": + return &Schedule{ + Second: 1 << seconds.min, + Minute: 1 << minutes.min, + Hour: 1 << hours.min, + Day: 1 << days.min, + Month: 1 << months.min, + Week: all(weeks), + } + + case "@monthly": + return &Schedule{ + Second: 1 << seconds.min, + Minute: 1 << minutes.min, + Hour: 1 << hours.min, + Day: 1 << days.min, + Month: all(months), + Week: all(weeks), + } + + case "@weekly": + return &Schedule{ + Second: 1 << seconds.min, + Minute: 1 << minutes.min, + Hour: 1 << hours.min, + Day: all(days), + Month: all(months), + Week: 1 << weeks.min, + } + + case "@daily", "@midnight": + return &Schedule{ + Second: 1 << seconds.min, + Minute: 1 << minutes.min, + Hour: 1 << hours.min, + Day: all(days), + Month: all(months), + Week: all(weeks), + } + + case "@hourly": + return &Schedule{ + Second: 1 << seconds.min, + Minute: 1 << minutes.min, + Hour: all(hours), + Day: all(days), + Month: all(months), + Week: all(weeks), + } + } + log.Panicf("Unrecognized descriptor: %s", spec) + return nil +} + +func (s *Schedule) Next(t time.Time) time.Time { + + // Start at the earliest possible time (the upcoming second). + t = t.Add(1*time.Second - time.Duration(t.Nanosecond())*time.Nanosecond) + + // This flag indicates whether a field has been incremented. + added := false + + // If no time is found within five years, return zero. + yearLimit := t.Year() + 5 + +WRAP: + if t.Year() > yearLimit { + return time.Time{} + } + + // Find the first applicable month. + // If it's this month, then do nothing. + for 1< 0 + dowMatch bool = 1< 0 + ) + + if s.Day&starBit > 0 || s.Week&starBit > 0 { + return domMatch && dowMatch + } + return domMatch || dowMatch +} + +func StartTask() { + go run() +} + +func GetTask(name string) Tasker { + lock.Lock() + if task, exist := adminTaskList[name]; exist { + lock.Unlock() + return task + } + lock.Unlock() + return nil +} + +func GetAllTask() map[string]Tasker { + ret := make(map[string]Tasker) + lock.Lock() + for name, task := range adminTaskList { + ret[name] = task + } + lock.Unlock() + return ret +} + +func run() { + defer func() { + if err := recover(); err != nil { + fmt.Println("!!!schedule run err:", err) + } + }() + now := time.Now().Local() + lock.Lock() + for _, t := range adminTaskList { + t.SetNext(now) + } + lock.Unlock() + + for { + lock.Lock() + taskCnt := len(adminTaskList) + sortList := NewMapSorter(adminTaskList) + lock.Unlock() + sortList.Sort() + var effective time.Time + if taskCnt == 0 || sortList.Vals[0].GetNext().IsZero() { + // If there are no entries yet, just sleep - it still handles new entries + // and stop requests. + effective = now.AddDate(10, 0, 0) + } else { + effective = sortList.Vals[0].GetNext() + } + + select { + case now = <-time.After(effective.Sub(now)): + // Run every entry whose next time was this effective time. + for _, e := range sortList.Vals { + if e.GetNext() != effective { + break + } + go e.Run() + e.SetPrev(e.GetNext()) + e.SetNext(effective) + } + continue + case <-resume: + now = time.Now().Local() + continue + case <-stop: + return + } + } +} + +func StopTask() { + stop <- true +} + +func AddTask(taskname string, t Tasker) { + lock.Lock() + adminTaskList[taskname] = t + t.SetNext(time.Now().Local()) + lock.Unlock() + select { + case resume <- true: + default: + } +} + +func DelTask(taskname string) { + lock.Lock() + delete(adminTaskList, taskname) + lock.Unlock() +} + +// sort map for tasker +type MapSorter struct { + Keys []string + Vals []Tasker +} + +func NewMapSorter(m map[string]Tasker) *MapSorter { + ms := &MapSorter{ + Keys: make([]string, 0, len(m)), + Vals: make([]Tasker, 0, len(m)), + } + for k, v := range m { + ms.Keys = append(ms.Keys, k) + ms.Vals = append(ms.Vals, v) + } + return ms +} + +func (ms *MapSorter) Sort() { + sort.Sort(ms) +} + +func (ms *MapSorter) Len() int { return len(ms.Keys) } +func (ms *MapSorter) Less(i, j int) bool { + if ms.Vals[i].GetNext().IsZero() { + return false + } + if ms.Vals[j].GetNext().IsZero() { + return true + } + return ms.Vals[i].GetNext().Before(ms.Vals[j].GetNext()) +} +func (ms *MapSorter) Swap(i, j int) { + ms.Vals[i], ms.Vals[j] = ms.Vals[j], ms.Vals[i] + ms.Keys[i], ms.Keys[j] = ms.Keys[j], ms.Keys[i] +} + +func getField(field string, r bounds) uint64 { + // list = range {"," range} + var bits uint64 + ranges := strings.FieldsFunc(field, func(r rune) bool { return r == ',' }) + for _, expr := range ranges { + bits |= getRange(expr, r) + } + return bits +} + +// getRange returns the bits indicated by the given expression: +// +// number | number "-" number [ "/" number ] +func getRange(expr string, r bounds) uint64 { + + var ( + start, end, step uint + rangeAndStep = strings.Split(expr, "/") + lowAndHigh = strings.Split(rangeAndStep[0], "-") + singleDigit = len(lowAndHigh) == 1 + ) + + var extra_star uint64 + if lowAndHigh[0] == "*" || lowAndHigh[0] == "?" { + start = r.min + end = r.max + extra_star = starBit + } else { + start = parseIntOrName(lowAndHigh[0], r.names) + switch len(lowAndHigh) { + case 1: + end = start + case 2: + end = parseIntOrName(lowAndHigh[1], r.names) + default: + log.Panicf("Too many hyphens: %s", expr) + } + } + + switch len(rangeAndStep) { + case 1: + step = 1 + case 2: + step = mustParseInt(rangeAndStep[1]) + + // Special handling: "N/step" means "N-max/step". + if singleDigit { + end = r.max + } + default: + log.Panicf("Too many slashes: %s", expr) + } + + if start < r.min { + log.Panicf("Beginning of range (%d) below minimum (%d): %s", start, r.min, expr) + } + if end > r.max { + log.Panicf("End of range (%d) above maximum (%d): %s", end, r.max, expr) + } + if start > end { + log.Panicf("Beginning of range (%d) beyond end of range (%d): %s", start, end, expr) + } + + return getBits(start, end, step) | extra_star +} + +// parseIntOrName returns the (possibly-named) integer contained in expr. +func parseIntOrName(expr string, names map[string]uint) uint { + if names != nil { + if namedInt, ok := names[strings.ToLower(expr)]; ok { + return namedInt + } + } + return mustParseInt(expr) +} + +// mustParseInt parses the given expression as an int or panics. +func mustParseInt(expr string) uint { + num, err := strconv.Atoi(expr) + if err != nil { + log.Panicf("Failed to parse int from %s: %s", expr, err) + } + if num < 0 { + log.Panicf("Negative number (%d) not allowed: %s", num, expr) + } + + return uint(num) +} + +// getBits sets all bits in the range [min, max], modulo the given step size. +func getBits(min, max, step uint) uint64 { + var bits uint64 + + // If step is 1, use shifts. + if step == 1 { + return ^(math.MaxUint64 << (max + 1)) & (math.MaxUint64 << min) + } + + // Else, use a simple loop. + for i := min; i <= max; i += step { + bits |= 1 << i + } + return bits +} + +// all returns all bits within the given bounds. (plus the star bit) +func all(r bounds) uint64 { + return getBits(r.min, r.max, 1) | starBit +} + +// task stats +func Stats() map[string]TaskStats { + stats := make(map[string]TaskStats) + taskStats.Range(func(k, v interface{}) bool { + if s, ok := v.(*TaskStats); ok { + stats[k.(string)] = *s + } + return true + }) + return stats +} + +func init() { + adminTaskList = make(map[string]Tasker) + stop = make(chan bool) + resume = make(chan bool, 10) +} diff --git a/core/schedule/task_test.go b/core/schedule/task_test.go new file mode 100644 index 0000000..e1f4a84 --- /dev/null +++ b/core/schedule/task_test.go @@ -0,0 +1,49 @@ +package schedule + +import ( + "fmt" + "sync" + "testing" + "time" +) + +func TestParse(t *testing.T) { + tk := NewTask("taska", "0/30 * * * * *", func() error { fmt.Println("hello world"); return nil }) + err := tk.Run() + if err != nil { + t.Fatal(err) + } + AddTask("taska", tk) + StartTask() + time.Sleep(6 * time.Second) + StopTask() +} + +func TestSpec(t *testing.T) { + wg := &sync.WaitGroup{} + wg.Add(2) + tk1 := NewTask("tk1", "0 12 * * * *", func() error { fmt.Println("tk1"); return nil }) + tk2 := NewTask("tk2", "0,10,20 * * * * *", func() error { fmt.Println("tk2"); wg.Done(); return nil }) + tk3 := NewTask("tk3", "0 10 * * * *", func() error { fmt.Println("tk3"); wg.Done(); return nil }) + + AddTask("tk1", tk1) + AddTask("tk2", tk2) + AddTask("tk3", tk3) + StartTask() + defer StopTask() + + select { + case <-time.After(200 * time.Second): + t.FailNow() + case <-wait(wg): + } +} + +func wait(wg *sync.WaitGroup) chan bool { + ch := make(chan bool) + go func() { + wg.Wait() + ch <- true + }() + return ch +} diff --git a/core/signal/config.go b/core/signal/config.go new file mode 100644 index 0000000..73b0ad7 --- /dev/null +++ b/core/signal/config.go @@ -0,0 +1,31 @@ +package signal + +import ( + "mongo.games.com/goserver/core" +) + +var Config = Configuration{} + +type Configuration struct { + SupportSignal bool +} + +func (c *Configuration) Name() string { + return "signal" +} + +func (c *Configuration) Init() error { + if c.SupportSignal { + //demon goroutine + go SignalHandlerModule.ProcessSignal() + } + return nil +} + +func (c *Configuration) Close() error { + return nil +} + +func init() { + core.RegistePackage(&Config) +} diff --git a/core/signal/interrupt_handler.go b/core/signal/interrupt_handler.go new file mode 100644 index 0000000..24e1119 --- /dev/null +++ b/core/signal/interrupt_handler.go @@ -0,0 +1,21 @@ +package signal + +import ( + "os" + + "mongo.games.com/goserver/core/logger" + "mongo.games.com/goserver/core/module" +) + +type InterruptSignalHandler struct { +} + +func (ish *InterruptSignalHandler) Process(s os.Signal, ud interface{}) error { + logger.Logger.Warn("Receive Interrupt signal, process start quit.") + module.Stop() + return nil +} + +func init() { + SignalHandlerModule.RegisteHandler(os.Interrupt, &InterruptSignalHandler{}, nil) +} diff --git a/core/signal/kill_handler.go b/core/signal/kill_handler.go new file mode 100644 index 0000000..a5cda94 --- /dev/null +++ b/core/signal/kill_handler.go @@ -0,0 +1,21 @@ +package signal + +import ( + "os" + + "mongo.games.com/goserver/core/logger" + "mongo.games.com/goserver/core/module" +) + +type KillSignalHandler struct { +} + +func (ish *KillSignalHandler) Process(s os.Signal, ud interface{}) error { + logger.Logger.Warn("Receive Kill signal, process be close") + module.Stop() + return nil +} + +func init() { + SignalHandlerModule.RegisteHandler(os.Kill, &KillSignalHandler{}, nil) +} diff --git a/core/signal/signal.go b/core/signal/signal.go new file mode 100644 index 0000000..d9e1495 --- /dev/null +++ b/core/signal/signal.go @@ -0,0 +1,106 @@ +// signal +package signal + +import ( + "errors" + "fmt" + "mongo.games.com/goserver/core/utils" + "os" + "os/signal" + "sync" + + "mongo.games.com/goserver/core/logger" +) + +var SignalHandlerModule = NewSignalHandler() + +type Handler interface { + Process(s os.Signal, ud interface{}) error +} + +type SignalHandler struct { + lock sync.RWMutex + sc chan os.Signal + mh map[os.Signal]map[Handler]interface{} +} + +func NewSignalHandler() *SignalHandler { + sh := &SignalHandler{ + sc: make(chan os.Signal, 10), + mh: make(map[os.Signal]map[Handler]interface{}), + } + + signal.Notify(sh.sc) + return sh +} + +func (this *SignalHandler) RegisteHandler(s os.Signal, h Handler, ud interface{}) error { + this.lock.Lock() + defer this.lock.Unlock() + if v, ok := this.mh[s]; !ok { + m := make(map[Handler]interface{}) + this.mh[s] = m + m[h] = ud + } else { + if _, has := v[h]; !has { + v[h] = ud + } else { + return errors.New(fmt.Sprintf("SignalHandler.RegisterHandler repeate registe handle %v %v", s, h)) + } + } + + return nil +} + +func (this *SignalHandler) UnregisteHandler(s os.Signal, h Handler) error { + this.lock.Lock() + defer this.lock.Unlock() + if v, ok := this.mh[s]; ok { + if _, has := v[h]; has { + delete(v, h) + } + } + + return nil +} + +func (this *SignalHandler) ClearHandler(s os.Signal) int { + this.lock.Lock() + defer this.lock.Unlock() + if v, ok := this.mh[s]; ok { + cnt := len(v) + delete(this.mh, s) + return cnt + } + return 0 +} + +func (this *SignalHandler) ProcessSignal() { + logger.Logger.Trace("(this *SignalHandler) ProcessSignal()") + for { + select { + case s, ok := <-this.sc: + if !ok { + logger.Logger.Trace("(this *SignalHandler) ProcessSignal() quit!!!") + return + } + //logger.Logger.Warn("-------->receive Signal:", s) + handlers := map[Handler]interface{}{} + this.lock.RLock() + v, ok := this.mh[s] + if ok && len(v) > 0 { + for hk, hv := range v { + handlers[hk] = hv + } + } + this.lock.RUnlock() + if ok && len(handlers) > 0 { + for hk, hv := range handlers { + utils.CatchPanic(func() { hk.Process(s, hv) }) + } + //} else { + // logger.Logger.Warn("-------->UnHandle Signal:", s) + } + } + } +} diff --git a/core/task/command_task_exe.go b/core/task/command_task_exe.go new file mode 100644 index 0000000..b0d9cde --- /dev/null +++ b/core/task/command_task_exe.go @@ -0,0 +1,23 @@ +package task + +import ( + "mongo.games.com/goserver/core/basic" + "mongo.games.com/goserver/core/utils" +) + +type taskExeCommand struct { + t Task +} + +func (ttc *taskExeCommand) Done(o *basic.Object) error { + defer o.ProcessSeqnum() + defer utils.DumpStackIfPanic("taskExeCommand") + ttc.t.setAfterQueCnt(o.GetPendingCommandCnt()) + return ttc.t.run(o) +} + +// SendTaskExe 将任务发送给一个worker处理 +func SendTaskExe(o *basic.Object, t Task) bool { + t.setBeforeQueCnt(o.GetPendingCommandCnt()) + return o.SendCommand(&taskExeCommand{t: t}, true) +} diff --git a/core/task/command_task_req.go b/core/task/command_task_req.go new file mode 100644 index 0000000..46ac2df --- /dev/null +++ b/core/task/command_task_req.go @@ -0,0 +1,166 @@ +package task + +import ( + "errors" + + "mongo.games.com/goserver/core/basic" + "mongo.games.com/goserver/core/logger" +) + +var ( + TaskErr_CannotFindWorker = errors.New("Cannot find fit worker.") + TaskErr_TaskExecuteObject = errors.New("Task can only be executed executor") +) + +type taskReqCommand struct { + t Task + n string + g string +} + +func (trc *taskReqCommand) Done(o *basic.Object) error { + defer o.ProcessSeqnum() + + var err error + var workerName string + var worker *Worker + if trc.g == "" { + workerName, err = TaskExecutor.c.Get(trc.n) + if err != nil { + logger.Logger.Debug("taskReqCommand done error:", err) + return err + } + worker = TaskExecutor.getWorker(workerName) + } else { + if wg, exist := TaskExecutor.getGroup(trc.g); wg != nil && exist { + workerName, err = wg.c.Get(trc.n) + if err != nil { + logger.Logger.Debug("taskReqCommand done error:", err) + return err + } + worker = wg.getWorker(workerName) + } else { + wg := TaskExecutor.AddGroup(trc.g) + if wg != nil { + workerName, err = wg.c.Get(trc.n) + if err != nil { + logger.Logger.Debug("taskReqCommand done error:", err) + return err + } + worker = wg.getWorker(workerName) + } + } + } + if worker != nil { + logger.Logger.Debug("task[", trc.n, "] dispatch-> worker[", workerName, "]") + ste := SendTaskExe(worker.Object, trc.t) + if ste == true { + logger.Logger.Debug("SendTaskExe success.") + } else { + logger.Logger.Debug("SendTaskExe failed.") + } + return nil + } else { + logger.Logger.Debugf("[%v] worker is no found.", workerName) + return TaskErr_CannotFindWorker + } + +} + +func sendTaskReqToExecutor(t Task, name string, gname string) bool { + if t == nil { + logger.Logger.Debug("sendTaskReqToExecutor error,t is nil") + return false + } + if t.getN() != nil && t.getS() == nil { + logger.Logger.Error(name, " You must specify the source object task.") + return false + } + return TaskExecutor.SendCommand(&taskReqCommand{t: t, n: name, g: gname}, true) +} + +type fixTaskReqCommand struct { + t Task + n string + g string +} + +func (trc *fixTaskReqCommand) Done(o *basic.Object) error { + defer o.ProcessSeqnum() + + var worker *Worker + if trc.g == "" { + worker = TaskExecutor.getFixWorker(trc.n) + if worker == nil { + worker = TaskExecutor.addFixWorker(trc.n) + } + } else { + if wg, ok := TaskExecutor.getGroup(trc.g); ok && wg != nil { + worker = wg.getFixWorker(trc.n) + if worker == nil { + worker = wg.addFixWorker(trc.n) + } + } else { + wg := TaskExecutor.AddGroup(trc.g) + if wg != nil { + worker = wg.getFixWorker(trc.n) + if worker == nil { + worker = wg.addFixWorker(trc.n) + } + } + } + } + + if worker != nil { + logger.Logger.Debug("task[", trc.n, "] dispatch-> worker[", trc.n, "]") + ste := SendTaskExe(worker.Object, trc.t) + if ste == true { + logger.Logger.Debug("SendTaskExe success.") + } else { + logger.Logger.Debug("SendTaskExe failed.") + } + return nil + } else { + logger.Logger.Debugf("[%v] worker is no found.", trc.n) + return TaskErr_CannotFindWorker + } +} + +func sendTaskReqToFixExecutor(t Task, name, gname string) bool { + if t == nil { + logger.Logger.Warn("sendTaskReqToExecutor error,t is nil") + return false + } + if t.getN() != nil && t.getS() == nil { + logger.Logger.Error(name, " You must specify the source object task.") + return false + } + return TaskExecutor.SendCommand(&fixTaskReqCommand{t: t, n: name, g: gname}, true) +} + +type broadcastTaskReqCommand struct { + t Task +} + +func (trc *broadcastTaskReqCommand) Done(o *basic.Object) error { + defer o.ProcessSeqnum() + + trc.t.AddRefCnt(int32(len(TaskExecutor.workers))) + for name, worker := range TaskExecutor.workers { + //copy + t := trc.t.clone(name) + if t != nil { + //logger.Logger.Trace("task[", t.name, "] dispatch-> worker[", name, "]") + SendTaskExe(worker.Object, t) + } + } + return nil +} + +func sendTaskReqToAllExecutor(t Task) bool { + if t == nil { + logger.Logger.Warn("sendTaskReqToExecutor error,t is nil") + return false + } + return TaskExecutor.SendCommand(&broadcastTaskReqCommand{t: t}, true) +} diff --git a/core/task/command_task_res.go b/core/task/command_task_res.go new file mode 100644 index 0000000..69569ac --- /dev/null +++ b/core/task/command_task_res.go @@ -0,0 +1,26 @@ +package task + +import ( + "mongo.games.com/goserver/core/basic" + "mongo.games.com/goserver/core/utils" +) + +type taskResCommand struct { + t Task + n CompleteNotify +} + +func (trc *taskResCommand) Done(o *basic.Object) error { + defer o.ProcessSeqnum() + defer utils.DumpStackIfPanic("taskExeCommand") + trc.t.done(trc.n) + return nil +} + +// SendTaskRes 将任务回调方法发送给一个节点处理 +func SendTaskRes(o *basic.Object, t Task, n CompleteNotify) bool { + if o == nil { + return false + } + return o.SendCommand(&taskResCommand{t: t, n: n}, true) +} diff --git a/core/task/config.go b/core/task/config.go new file mode 100644 index 0000000..6c4c6d7 --- /dev/null +++ b/core/task/config.go @@ -0,0 +1,50 @@ +package task + +import ( + "mongo.games.com/goserver/core" + "mongo.games.com/goserver/core/basic" +) + +var Config = Configuration{} + +type WorkerConfig struct { + Options basic.Options + WorkerCnt int +} + +type Configuration struct { + Options basic.Options + Worker WorkerConfig +} + +func (c *Configuration) Name() string { + return "executor" +} + +func (c *Configuration) Init() error { + if c.Options.QueueBacklog <= 0 { + c.Options.QueueBacklog = 1024 + } + if c.Options.MaxDone <= 0 { + c.Options.MaxDone = 1024 + } + if c.Worker.Options.QueueBacklog <= 0 { + c.Worker.Options.QueueBacklog = 1024 + } + if c.Worker.Options.MaxDone <= 0 { + c.Worker.Options.MaxDone = 1024 + } + if c.Worker.WorkerCnt <= 0 { + c.Worker.WorkerCnt = 8 + } + TaskExecutor.Start() + return nil +} + +func (c *Configuration) Close() error { + return nil +} + +func init() { + core.RegistePackage(&Config) +} diff --git a/core/task/executor.go b/core/task/executor.go new file mode 100644 index 0000000..a3da743 --- /dev/null +++ b/core/task/executor.go @@ -0,0 +1,170 @@ +package task + +import ( + "fmt" + "sync/atomic" + + "github.com/stathat/consistent" + "mongo.games.com/goserver/core" + "mongo.games.com/goserver/core/basic" + "mongo.games.com/goserver/core/logger" +) + +var ( + WorkerIdGenerator int32 = 0 + WorkerInitialCnt = 8 + WorkerVirtualNum = 8 + TaskExecutor = NewExecutor() +) + +type WorkerGroup struct { + name string + c *consistent.Consistent + e *Executor + workers map[string]*Worker + fixWorkers map[string]*Worker +} + +type Executor struct { + *basic.Object + c *consistent.Consistent + workers map[string]*Worker + fixWorkers map[string]*Worker + group map[string]*WorkerGroup +} + +func NewExecutor() *Executor { + e := &Executor{ + c: consistent.New(), + workers: make(map[string]*Worker), + fixWorkers: make(map[string]*Worker), + group: make(map[string]*WorkerGroup), + } + + return e +} + +func (e *Executor) Start() { + logger.Logger.Trace("Executor Start") + defer logger.Logger.Trace("Executor Start [ok]") + + e.Object = basic.NewObject(core.ObjId_ExecutorId, + "executor", + Config.Options, + nil) + e.c.NumberOfReplicas = WorkerVirtualNum + e.UserData = e + e.addWorker(Config.Worker.WorkerCnt) + + core.LaunchChild(TaskExecutor.Object) +} + +func (e *Executor) addWorker(workerCnt int) { + for i := 0; i < workerCnt; i++ { + id := atomic.AddInt32(&WorkerIdGenerator, 1) + w := &Worker{ + Object: basic.NewObject(int(id), + fmt.Sprintf("worker_%d", id), + Config.Worker.Options, + nil), + } + + w.UserData = w + e.LaunchChild(w.Object) + e.c.Add(w.Name) + e.workers[w.Name] = w + } +} + +func (e *Executor) getWorker(name string) *Worker { + if w, exist := e.workers[name]; exist { + return w + } + return nil +} + +func (e *Executor) getFixWorker(name string) *Worker { + if w, exist := e.fixWorkers[name]; exist { + return w + } + return nil +} + +func (e *Executor) addFixWorker(name string) *Worker { + logger.Logger.Infof("Executor.AddFixWorker(%v)", name) + id := atomic.AddInt32(&WorkerIdGenerator, 1) + w := &Worker{ + Object: basic.NewObject(int(id), + name, + Config.Worker.Options, + nil), + } + + w.UserData = w + e.LaunchChild(w.Object) + e.fixWorkers[name] = w + return w +} + +func (e *Executor) getGroup(gname string) (*WorkerGroup, bool) { + wg, ok := e.group[gname] + return wg, ok +} + +func (e *Executor) AddGroup(gname string) *WorkerGroup { + wg := &WorkerGroup{ + e: e, + c: consistent.New(), + name: gname, + workers: make(map[string]*Worker), + fixWorkers: make(map[string]*Worker), + } + + for i := 0; i < Config.Worker.WorkerCnt; i++ { + id := atomic.AddInt32(&WorkerIdGenerator, 1) + w := &Worker{ + Object: basic.NewObject(int(id), + fmt.Sprintf("g_%v_worker_%d", gname, id), + Config.Worker.Options, + nil), + } + + w.UserData = w + e.LaunchChild(w.Object) + wg.c.Add(w.Name) + wg.workers[w.Name] = w + } + + e.group[gname] = wg + return wg +} + +func (wg *WorkerGroup) getWorker(name string) *Worker { + if w, exist := wg.workers[name]; exist { + return w + } + return nil +} + +func (wg *WorkerGroup) getFixWorker(name string) *Worker { + if w, exist := wg.fixWorkers[name]; exist { + return w + } + return nil +} + +func (wg *WorkerGroup) addFixWorker(name string) *Worker { + logger.Logger.Infof("WorkerGroup(%v).AddFixWorker(%v)", wg.name, name) + id := atomic.AddInt32(&WorkerIdGenerator, 1) + w := &Worker{ + Object: basic.NewObject(int(id), + fmt.Sprintf("%s_%s", wg.name, name), + Config.Worker.Options, + nil), + } + + w.UserData = w + wg.e.LaunchChild(w.Object) + wg.fixWorkers[name] = w + return w +} diff --git a/core/task/task.go b/core/task/task.go new file mode 100644 index 0000000..1724049 --- /dev/null +++ b/core/task/task.go @@ -0,0 +1,272 @@ +package task + +import ( + "fmt" + "runtime" + "sync/atomic" + "time" + + "mongo.games.com/goserver/core" + "mongo.games.com/goserver/core/basic" + "mongo.games.com/goserver/core/container" + "mongo.games.com/goserver/core/container/recycler" + "mongo.games.com/goserver/core/logger" + "mongo.games.com/goserver/core/profile" +) + +type Callable interface { + Call(*basic.Object) interface{} +} + +type CompleteNotify interface { + Done(interface{}, Task) +} + +type Task interface { + AddRefCnt(cnt int32) int32 + GetRefCnt() int32 + Get() interface{} + GetWithTimeout(timeout time.Duration) interface{} + GetEnv(k interface{}) interface{} + PutEnv(k, v interface{}) bool + SetAlertTime(alertt time.Duration) + GetCostTime() time.Duration + GetRunTime() time.Duration + Start() + StartByExecutor(name string) bool + StartByFixExecutor(name string) bool + BroadcastToAllExecutor() bool + StartByGroupExecutor(gname string, name string) bool + StartByGroupFixExecutor(name, gname string) bool + //inner + clone(name string) Task + run(o *basic.Object) (e error) + done(n CompleteNotify) + sendRsp() + setAfterQueCnt(n int) + setBeforeQueCnt(n int) + getS() *basic.Object + getC() Callable + getN() CompleteNotify +} + +type CallableWrapper func(o *basic.Object) interface{} + +func (cw CallableWrapper) Call(o *basic.Object) interface{} { + return cw(o) +} + +type CompleteNotifyWrapper func(interface{}, Task) + +func (cnw CompleteNotifyWrapper) Done(i interface{}, t Task) { + cnw(i, t) +} + +type baseTask struct { + imp Task + s *basic.Object + c Callable + n CompleteNotify + r chan interface{} + v interface{} + env *container.SynchronizedMap + tCreate time.Time + tStart time.Time + alertTime time.Duration + name string + refTaskCnt int32 + beforeQueCnt int //入队列前,等待中的任务数量 + afterQueCnt int //出队列后,等待中的任务数量 +} + +func New(s *basic.Object, c Callable, n CompleteNotify, name ...string) Task { + return newBaseTask(s, c, n, name...) +} + +func newBaseTask(s *basic.Object, c Callable, n CompleteNotify, name ...string) *baseTask { + t := &baseTask{ + s: s, + c: c, + n: n, + r: make(chan interface{}, 1), + tCreate: time.Now(), + } + if len(name) != 0 { + t.name = name[0] + } + if s == nil { + t.s = core.CoreObject() + } + t.imp = t + return t +} + +func (t *baseTask) clone(name string) Task { + fullname := t.name + if name != "" { + fullname += "-" + name + } + return New(t.s, t.c, t.n, fullname) +} + +func (t *baseTask) setAfterQueCnt(n int) { + t.afterQueCnt = n +} + +func (t *baseTask) setBeforeQueCnt(n int) { + t.beforeQueCnt = n +} + +func (t *baseTask) getS() *basic.Object { + return t.s +} + +func (t *baseTask) getC() Callable { + return t.c +} + +func (t *baseTask) getN() CompleteNotify { + return t.n +} + +func (t *baseTask) AddRefCnt(cnt int32) int32 { + return atomic.AddInt32(&t.refTaskCnt, cnt) +} + +func (t *baseTask) GetRefCnt() int32 { + return atomic.LoadInt32(&t.refTaskCnt) +} + +func (t *baseTask) Get() interface{} { + if t.n != nil { + panic("Task result by CompleteNotify return") + } + + return <-t.r +} + +func (t *baseTask) GetWithTimeout(timeout time.Duration) interface{} { + if timeout == 0 { + return t.Get() + } else { + timer := recycler.GetTimer(timeout) + defer recycler.GiveTimer(timer) + select { + case r, ok := <-t.r: + if ok { + return r + } else { + return nil + } + case <-timer.C: + return nil + } + } + return nil +} + +func (t *baseTask) GetEnv(k interface{}) interface{} { + if t.env == nil { + return nil + } + return t.env.Get(k) +} + +func (t *baseTask) PutEnv(k, v interface{}) bool { + if t.env == nil { + t.env = container.NewSynchronizedMap() + } + if t.env != nil { + t.env.Set(k, v) + } + + return true +} + +func (t *baseTask) run(o *basic.Object) (e error) { + watch := profile.TimeStatisticMgr.WatchStart(fmt.Sprintf("/task/%v/run", t.name), profile.TIME_ELEMENT_TASK) + defer func() { + if watch != nil { + watch.Stop() + } + + if err := recover(); err != nil { + var buf [4096]byte + n := runtime.Stack(buf[:], false) + logger.Logger.Error("Task::run stack--->", string(buf[:n])) + } + }() + + t.tStart = time.Now() + wait := t.tStart.Sub(t.tCreate) + t.v = t.c.Call(o) + dura := t.GetRunTime() + + if t.r != nil { + t.r <- t.v + } + + t.imp.sendRsp() + + if t.alertTime != 0 && t.name != "" { + cost := t.GetCostTime() + if cost > t.alertTime { + logger.Logger.Warn("task [", t.name, "] since createTime(", + cost, ") since startTime(", dura, "), in quene wait(", wait, ")", " beforeQueCnt(", t.beforeQueCnt, ") afterQueCnt(", t.afterQueCnt, ")") + } + } + return nil +} + +func (t *baseTask) done(n CompleteNotify) { + if n != nil { + n.Done(t.v, t) + } +} + +func (t *baseTask) sendRsp() { + if t.n != nil { + SendTaskRes(t.s, t, t.n) + } +} + +// Start 启动独立的一个协程,相当于 go 关键字 +func (t *baseTask) Start() { + go t.imp.run(nil) +} + +func (t *baseTask) SetAlertTime(alertt time.Duration) { + t.alertTime = alertt +} + +func (t *baseTask) GetCostTime() time.Duration { + return time.Now().Sub(t.tCreate) +} + +func (t *baseTask) GetRunTime() time.Duration { + return time.Now().Sub(t.tStart) +} + +// StartByExecutor 根据名称的哈希值选择一个协程,在协程中执行(框架启动时默认会创建几个协程) +func (t *baseTask) StartByExecutor(name string) bool { + return sendTaskReqToExecutor(t, name, "") +} + +// StartByFixExecutor 根据名称创建一个协程,如果协程已经存在,相同名称的任务会在同一个协程中执行 +func (t *baseTask) StartByFixExecutor(name string) bool { + return sendTaskReqToFixExecutor(t, name, "") +} + +func (t *baseTask) BroadcastToAllExecutor() bool { + return sendTaskReqToAllExecutor(t) +} + +// StartByGroupExecutor 在 StartByExecutor 前根据gname分组 +func (t *baseTask) StartByGroupExecutor(gname string, name string) bool { + return sendTaskReqToExecutor(t, name, gname) +} + +// StartByGroupFixExecutor 在 StartByFixExecutor 前根据gname分组 +func (t *baseTask) StartByGroupFixExecutor(name, gname string) bool { + return sendTaskReqToFixExecutor(t, name, gname) +} diff --git a/core/task/task_mutex.go b/core/task/task_mutex.go new file mode 100644 index 0000000..f53c984 --- /dev/null +++ b/core/task/task_mutex.go @@ -0,0 +1,59 @@ +package task + +import ( + "errors" + "sync" + "sync/atomic" + + "mongo.games.com/goserver/core/basic" +) + +var taskMutexLock sync.Mutex +var taskMutexPool = make(map[string]Task) +var ErrTaskIsRunning = errors.New("mutex task is running") + +// 互斥任务,相同key的任务,只有一个CompleteNotify,后边再触发的自动忽略,例如:客户端多次点击导致的请求,只有第一次给反馈 +type mutexTask struct { + *baseTask + running int32 //是否正在运行 + mutexKey string //互斥任务key +} + +func NewMutexTask(s *basic.Object, c Callable, n CompleteNotify, key, name string) (t Task, done bool) { + mutexKey := name + key + taskMutexLock.Lock() + if t, ok := taskMutexPool[mutexKey]; ok { + taskMutexLock.Unlock() + return t, true + } + + base := newBaseTask(s, c, n, name) + t = &mutexTask{ + baseTask: base, + mutexKey: mutexKey, + } + base.imp = t + taskMutexPool[mutexKey] = t + taskMutexLock.Unlock() + return t, false +} + +// 不支持 +func (t *mutexTask) clone(name string) Task { + return nil +} + +func (t *mutexTask) run(o *basic.Object) (e error) { + // process mutex task + if !atomic.CompareAndSwapInt32(&t.running, 0, 1) { + return ErrTaskIsRunning + } + + e = t.baseTask.run(o) + + taskMutexLock.Lock() + delete(taskMutexPool, t.mutexKey) + taskMutexLock.Unlock() + + return nil +} diff --git a/core/task/task_share.go b/core/task/task_share.go new file mode 100644 index 0000000..900e26d --- /dev/null +++ b/core/task/task_share.go @@ -0,0 +1,88 @@ +package task + +import ( + "mongo.games.com/goserver/core/basic" + "sync" + "sync/atomic" +) + +var taskShareLock sync.Mutex +var taskSharePool = make(map[string]*shareTask) + +// 共享任务,多次请求共享一个Callable;返回多个CompleteNotify;例如:多个用户查询同一份榜单数据,避免缓存击穿 +type shareTaskNotify struct { + s *basic.Object + n CompleteNotify +} + +type shareTask struct { + *baseTask + sync.RWMutex + notifies []*shareTaskNotify + running int32 //是否正在运行 + shareKey string //共享任务key +} + +func RunShareTask(s *basic.Object, c Callable, n CompleteNotify, key, name string) (t Task, done bool) { + mutexKey := name + key + taskShareLock.Lock() + if t, ok := taskSharePool[mutexKey]; ok { + taskShareLock.Unlock() + if t.v != nil { + SendTaskRes(t.s, t, t.n) + } else { + t.Lock() + t.notifies = append(t.notifies, &shareTaskNotify{s: s, n: n}) + t.Unlock() + } + return t, true + } + + bt := newBaseTask(s, c, n, name) + st := &shareTask{ + baseTask: bt, + shareKey: mutexKey, + } + t = st + bt.imp = t + taskSharePool[mutexKey] = st + taskShareLock.Unlock() + + go st.run(nil) + + return t, false +} + +// 不支持 +func (t *shareTask) clone(name string) Task { + return nil +} + +func (t *shareTask) run(o *basic.Object) (e error) { + // process mutex task + if !atomic.CompareAndSwapInt32(&t.running, 0, 1) { + return ErrTaskIsRunning + } + + e = t.baseTask.run(o) + + taskShareLock.Lock() + delete(taskSharePool, t.shareKey) + taskShareLock.Unlock() + + return nil +} + +func (t *shareTask) sendRsp() { + if t.n != nil { + SendTaskRes(t.s, t, t.n) + } + + if len(t.notifies) != 0 { + t.RLock() + defer t.RUnlock() + for _, s := range t.notifies { + SendTaskRes(s.s, t, s.n) + } + } +} diff --git a/core/task/worker.go b/core/task/worker.go new file mode 100644 index 0000000..6d9f481 --- /dev/null +++ b/core/task/worker.go @@ -0,0 +1,9 @@ +package task + +import ( + "mongo.games.com/goserver/core/basic" +) + +type Worker struct { + *basic.Object +} diff --git a/core/timer/command_start_timer.go b/core/timer/command_start_timer.go new file mode 100644 index 0000000..aa032e1 --- /dev/null +++ b/core/timer/command_start_timer.go @@ -0,0 +1,62 @@ +package timer + +import ( + "container/heap" + "time" + + "mongo.games.com/goserver/core" + "mongo.games.com/goserver/core/basic" +) + +type startTimerCommand struct { + src *basic.Object + ta TimerAction + ud interface{} + interval time.Duration + times int + h TimerHandle +} + +func (stc *startTimerCommand) Done(o *basic.Object) error { + defer o.ProcessSeqnum() + + te := &TimerEntity{ + sink: stc.src, + ud: stc.ud, + ta: stc.ta, + interval: stc.interval, + times: stc.times, + h: stc.h, + next: time.Now().Add(stc.interval), + } + + heap.Push(TimerModule.tq, te) + + return nil +} + +// StartTimer only can be called in main module +func StartTimer(ta TimerAction, ud interface{}, interval time.Duration, times int) (TimerHandle, bool) { + return StartTimerByObject(core.CoreObject(), ta, ud, interval, times) +} +func AfterTimer(taw TimerActionWrapper, ud interface{}, interval time.Duration) (TimerHandle, bool) { + var tac = &TimerActionCommon{ + Taw: taw, + } + return StartTimerByObject(core.CoreObject(), tac, ud, interval, 1) +} + +func StartTimerByObject(src *basic.Object, ta TimerAction, ud interface{}, interval time.Duration, times int) (TimerHandle, bool) { + h := generateTimerHandle() + ret := TimerModule.SendCommand( + &startTimerCommand{ + src: src, + ta: ta, + ud: ud, + interval: interval, + times: times, + h: h, + }, + true) + return h, ret +} diff --git a/core/timer/command_stop_timer.go b/core/timer/command_stop_timer.go new file mode 100644 index 0000000..4d6cafe --- /dev/null +++ b/core/timer/command_stop_timer.go @@ -0,0 +1,25 @@ +package timer + +import ( + "container/heap" + + "mongo.games.com/goserver/core/basic" +) + +type stopTimerCommand struct { + h TimerHandle +} + +func (stc *stopTimerCommand) Done(o *basic.Object) error { + defer o.ProcessSeqnum() + + if v, ok := TimerModule.tq.ref[stc.h]; ok { + heap.Remove(TimerModule.tq, v) + } + + return nil +} + +func StopTimer(h TimerHandle) bool { + return TimerModule.SendCommand(&stopTimerCommand{h: h}, true) +} diff --git a/core/timer/command_timeout_timer.go b/core/timer/command_timeout_timer.go new file mode 100644 index 0000000..490ec37 --- /dev/null +++ b/core/timer/command_timeout_timer.go @@ -0,0 +1,41 @@ +package timer + +import ( + "fmt" + "mongo.games.com/goserver/core/basic" + "mongo.games.com/goserver/core/profile" + "reflect" +) + +type timeoutCommand struct { + te *TimerEntity +} + +func (tc *timeoutCommand) Done(o *basic.Object) error { + tta := reflect.TypeOf(tc.te.ta) + watch := profile.TimeStatisticMgr.WatchStart(fmt.Sprintf("/timer/%v/ontimer", tta.Name()), profile.TIME_ELEMENT_TIMER) + defer func() { + o.ProcessSeqnum() + if watch != nil { + watch.Stop() + } + }() + if tc.te.stoped { + return nil + } + if tc.te.ta.OnTimer(tc.te.h, tc.te.ud) == false { + tc.te.stoped = true + if tc.te.times < 0 { + StopTimer(tc.te.h) + } + } + return nil +} + +func SendTimeout(te *TimerEntity) bool { + if te.sink == nil { + return false + } + + return te.sink.SendCommand(&timeoutCommand{te: te}, true) +} diff --git a/core/timer/config.go b/core/timer/config.go new file mode 100644 index 0000000..01dca14 --- /dev/null +++ b/core/timer/config.go @@ -0,0 +1,42 @@ +package timer + +import ( + "time" + + "mongo.games.com/goserver/core" + "mongo.games.com/goserver/core/basic" +) + +var Config = Configuration{} + +type Configuration struct { + Options basic.Options +} + +func (c *Configuration) Name() string { + return "timer" +} + +func (c *Configuration) Init() error { + if c.Options.QueueBacklog <= 0 { + c.Options.QueueBacklog = 1024 + } + if c.Options.MaxDone <= 0 { + c.Options.MaxDone = 1024 + } + if c.Options.Interval <= 0 { + c.Options.Interval = time.Millisecond * 10 + } else { + c.Options.Interval = time.Millisecond * c.Options.Interval + } + TimerModule.Start() + return nil +} + +func (c *Configuration) Close() error { + return nil +} + +func init() { + core.RegistePackage(&Config) +} diff --git a/core/timer/timer.go b/core/timer/timer.go new file mode 100644 index 0000000..f4a6ab6 --- /dev/null +++ b/core/timer/timer.go @@ -0,0 +1,83 @@ +package timer + +import ( + "container/heap" + "time" + + "mongo.games.com/goserver/core" + "mongo.games.com/goserver/core/basic" + "mongo.games.com/goserver/core/logger" +) + +var ( + TimerHandleGenerator uint32 = 1 + InvalidTimerHandle TimerHandle = 0 + TimerModule *TimerMgr = NewTimerMgr() +) + +type TimerMgr struct { + *basic.Object + tq *TimerQueue +} + +func NewTimerMgr() *TimerMgr { + tm := &TimerMgr{ + tq: NewTimerQueue(), + } + + return tm +} + +func (tm *TimerMgr) Start() { + logger.Logger.Trace("Timer Start") + defer logger.Logger.Trace("Timer Start [ok]") + + tm.Object = basic.NewObject(core.ObjId_TimerId, + "timer", + Config.Options, + tm) + tm.UserData = tm + + core.LaunchChild(TimerModule.Object) +} + +func (tm *TimerMgr) TimerCount() int { + return tm.tq.Len() +} + +func (tm *TimerMgr) OnTick() { + nowTime := time.Now() + for { + if tm.tq.Len() > 0 { + t := heap.Pop(tm.tq) + if te, ok := t.(*TimerEntity); ok { + if !te.stoped && te.next.Before(nowTime) { + if te.times > 0 { + te.times-- + } + //Avoid async stop timer failed + if te.times != 0 { + te.next = te.next.Add(te.interval) + heap.Push(tm.tq, te) + } + if !SendTimeout(te) { + if v, ok := tm.tq.ref[te.h]; ok { + heap.Remove(tm.tq, v) + } + } + } else { + if !te.stoped { + heap.Push(tm.tq, te) + } + return + } + } + } else { + return + } + } +} + +func (tm *TimerMgr) OnStart() {} + +func (tm *TimerMgr) OnStop() {} diff --git a/core/timer/timer_action.go b/core/timer/timer_action.go new file mode 100644 index 0000000..07ef908 --- /dev/null +++ b/core/timer/timer_action.go @@ -0,0 +1,21 @@ +package timer + +type TimerHandle uint32 + +type TimerAction interface { + OnTimer(h TimerHandle, ud interface{}) bool +} + +type TimerActionWrapper func(h TimerHandle, ud interface{}) bool + +func (taw TimerActionWrapper) OnTimer(h TimerHandle, ud interface{}) bool { + return taw(h, ud) +} + +type TimerActionCommon struct { + Taw TimerActionWrapper +} + +func (this TimerActionCommon) OnTimer(h TimerHandle, ud interface{}) bool { + return this.Taw(h, ud) +} diff --git a/core/timer/timer_queue.go b/core/timer/timer_queue.go new file mode 100644 index 0000000..e6d8752 --- /dev/null +++ b/core/timer/timer_queue.go @@ -0,0 +1,66 @@ +package timer + +import ( + "container/heap" + "sync/atomic" + "time" + + "mongo.games.com/goserver/core/basic" +) + +type TimerEntity struct { + sink *basic.Object + ud interface{} + interval time.Duration + next time.Time + times int + ta TimerAction + h TimerHandle + stoped bool +} + +type TimerQueue struct { + queue []*TimerEntity + ref map[TimerHandle]int +} + +func generateTimerHandle() TimerHandle { + return TimerHandle(atomic.AddUint32(&TimerHandleGenerator, 1)) +} + +func NewTimerQueue() *TimerQueue { + tq := &TimerQueue{ + ref: make(map[TimerHandle]int), + } + heap.Init(tq) + return tq +} +func (tq TimerQueue) Len() int { + return len(tq.queue) +} + +func (tq TimerQueue) Less(i, j int) bool { + return tq.queue[i].next.Before(tq.queue[j].next) +} + +func (tq *TimerQueue) Swap(i, j int) { + tq.queue[i], tq.queue[j] = tq.queue[j], tq.queue[i] + tq.ref[tq.queue[i].h] = i + tq.ref[tq.queue[j].h] = j +} + +func (tq *TimerQueue) Push(x interface{}) { + n := len(tq.queue) + te := x.(*TimerEntity) + tq.ref[te.h] = n + tq.queue = append(tq.queue, te) +} + +func (tq *TimerQueue) Pop() interface{} { + old := tq.queue + n := len(old) + te := old[n-1] + delete(tq.ref, te.h) + tq.queue = old[0 : n-1] + return te +} diff --git a/core/timer/timer_queue_test.go b/core/timer/timer_queue_test.go new file mode 100644 index 0000000..8fd8581 --- /dev/null +++ b/core/timer/timer_queue_test.go @@ -0,0 +1,88 @@ +package timer + +import ( + "container/heap" + "testing" + "time" +) + +func TestTimerQueuePush(t *testing.T) { + tq := NewTimerQueue() + tNow := time.Now() + te1 := &TimerEntity{ + ud: int(2), + next: tNow.Add(time.Minute), + } + te2 := &TimerEntity{ + ud: int(1), + next: tNow.Add(time.Second), + } + te3 := &TimerEntity{ + ud: int(3), + next: tNow.Add(time.Hour), + } + heap.Push(tq, te2) + heap.Push(tq, te1) + heap.Push(tq, te3) + + if tq.Len() != 3 { + t.Fatal("Timer Queue Size error") + } + var ( + tee interface{} + te *TimerEntity + ok bool + ) + tee = heap.Pop(tq) + if te, ok = tee.(*TimerEntity); ok { + if te.ud.(int) != 1 { + t.Fatal("First Must 1.") + } + } + + tee = heap.Pop(tq) + if te, ok = tee.(*TimerEntity); ok { + if te.ud.(int) != 2 { + t.Fatal("Second Must 2.") + } + } + + tee = heap.Pop(tq) + if te, ok = tee.(*TimerEntity); ok { + if te.ud.(int) != 3 { + t.Fatal("Third Must 3.") + } + } +} + +func BenchmarkTimerQueuePush(b *testing.B) { + tq := NewTimerQueue() + b.StartTimer() + for i := 0; i < b.N; i++ { + h := generateTimerHandle() + te := &TimerEntity{ + h: h, + } + tq.Push(te) + } + b.StopTimer() +} + +func BenchmarkTimerQueuePop(b *testing.B) { + tq := NewTimerQueue() + + for i := 0; i < b.N; i++ { + h := generateTimerHandle() + te := &TimerEntity{ + h: h, + } + tq.Push(te) + } + + b.StartTimer() + for i := 0; i < b.N; i++ { + te := tq.Pop() + tq.Push(te) + } + b.StopTimer() +} diff --git a/core/transact/command_trans_resume.go b/core/transact/command_trans_resume.go new file mode 100644 index 0000000..9d37da9 --- /dev/null +++ b/core/transact/command_trans_resume.go @@ -0,0 +1,19 @@ +package transact + +import ( + "mongo.games.com/goserver/core/basic" +) + +type transactResumeCommand struct { + tnode *TransNode +} + +func (trc *transactResumeCommand) Done(o *basic.Object) error { + defer o.ProcessSeqnum() + trc.tnode.checkExeOver() + return nil +} + +func SendTranscatResume(tnode *TransNode) bool { + return tnode.ownerObj.SendCommand(&transactResumeCommand{tnode: tnode}, true) +} diff --git a/core/transact/command_trans_yield.go b/core/transact/command_trans_yield.go new file mode 100644 index 0000000..814ee19 --- /dev/null +++ b/core/transact/command_trans_yield.go @@ -0,0 +1,19 @@ +package transact + +import ( + "mongo.games.com/goserver/core/basic" +) + +type transactYieldCommand struct { + tnode *TransNode +} + +func (trc *transactYieldCommand) Done(o *basic.Object) error { + defer o.ProcessSeqnum() + trc.tnode.checkExeOver() + return nil +} + +func SendTranscatYield(tnode *TransNode) bool { + return tnode.ownerObj.SendCommand(&transactYieldCommand{tnode: tnode}, true) +} diff --git a/core/transact/config.go b/core/transact/config.go new file mode 100644 index 0000000..e872e74 --- /dev/null +++ b/core/transact/config.go @@ -0,0 +1,36 @@ +// config +package transact + +import ( + "mongo.games.com/goserver/core" + "mongo.games.com/goserver/core/logger" +) + +var Config = Configuration{} + +type Configuration struct { + TxSkeletonName string + tcs TransactCommSkeleton +} + +func (this *Configuration) Name() string { + return "tx" +} + +func (this *Configuration) Init() error { + if this.TxSkeletonName != "" { + this.tcs = GetTxCommSkeleton(this.TxSkeletonName) + if this.tcs == nil { + logger.Logger.Warnf("%v TxSkeletonName not registed!!!", this.TxSkeletonName) + } + } + return nil +} + +func (this *Configuration) Close() error { + return nil +} + +func init() { + core.RegistePackage(&Config) +} diff --git a/core/transact/doc.go b/core/transact/doc.go new file mode 100644 index 0000000..1aa7025 --- /dev/null +++ b/core/transact/doc.go @@ -0,0 +1,3 @@ +package transact + +// 2pc diff --git a/core/transact/transcommitpolicy.go b/core/transact/transcommitpolicy.go new file mode 100644 index 0000000..cdd836e --- /dev/null +++ b/core/transact/transcommitpolicy.go @@ -0,0 +1,8 @@ +package transact + +const ( + TransactCommitPolicy_SelfDecide TransactCommitPolicy = iota + TransactCommitPolicy_TwoPhase +) + +type TransactCommitPolicy int diff --git a/core/transact/transcommskeleton.go b/core/transact/transcommskeleton.go new file mode 100644 index 0000000..67b98a3 --- /dev/null +++ b/core/transact/transcommskeleton.go @@ -0,0 +1,26 @@ +// transcommskeleton +package transact + +var txSkeletons = make(map[string]TransactCommSkeleton) + +type TransactCommSkeleton interface { + SendTransResult(parent, me *TransNodeParam, tr *TransResult) bool + SendTransStart(parent, me *TransNodeParam, ud interface{}) bool + SendCmdToTransNode(tnp *TransNodeParam, cmd TransCmd) bool + GetSkeletonID() int + GetAreaID() int +} + +func RegisteTxCommSkeleton(name string, tcs TransactCommSkeleton) { + if _, exist := txSkeletons[name]; exist { + panic("repeate registe TxCommSkeleton:" + name) + } + txSkeletons[name] = tcs +} + +func GetTxCommSkeleton(name string) TransactCommSkeleton { + if t, exist := txSkeletons[name]; exist { + return t + } + return nil +} diff --git a/core/transact/transcoordinator.go b/core/transact/transcoordinator.go new file mode 100644 index 0000000..b27571b --- /dev/null +++ b/core/transact/transcoordinator.go @@ -0,0 +1,224 @@ +// distributed transcation coordinater +package transact + +import ( + "sync" + "time" + + "mongo.games.com/goserver/core" + "mongo.games.com/goserver/core/logger" + "mongo.games.com/goserver/core/module" + "mongo.games.com/goserver/core/timer" + "mongo.games.com/goserver/core/utils" +) + +var ( + DTCModule = &transactCoordinater{transPool: make(map[TransNodeID]*TransNode)} + tta = &transactTimerAction{} +) + +type transactCoordinater struct { + idGen utils.AtomicIdGen + lock sync.Mutex + transPool map[TransNodeID]*TransNode + quit bool + reaped bool +} + +func (this *transactCoordinater) ModuleName() string { + return module.ModuleName_Transact +} + +func (this *transactCoordinater) Init() { +} + +func (this *transactCoordinater) Update() { +} + +func (this *transactCoordinater) Shutdown() { + if this.quit { + return + } + + this.quit = true + + if len(this.transPool) > 0 { + go this.reapRoutine() + return + } else { + this.destroy() + } +} + +func (this *transactCoordinater) reapRoutine() { + if this.reaped { + return + } + + this.reaped = true + + for len(this.transPool) > 0 { + time.Sleep(time.Second) + } + + this.destroy() +} + +func (this *transactCoordinater) destroy() { + module.UnregisteModule(this) +} + +func (this *transactCoordinater) releaseTrans(tnode *TransNode) { + if this == nil || tnode == nil { + return + } + timer.StopTimer(tnode.timeHandle) + this.delTransNode(tnode) +} + +func (this *transactCoordinater) spawnTransNodeID() TransNodeID { + tid := int64(this.idGen.NextId()) + if Config.tcs != nil { + tid = int64(Config.tcs.GetAreaID())<<48 | int64(Config.tcs.GetSkeletonID())<<32 | tid + } + return TransNodeID(tid) +} + +func (this *transactCoordinater) createTransNode(tnp *TransNodeParam, ud interface{}, timeout time.Duration) *TransNode { + if this == nil || tnp == nil { + logger.Logger.Warn("transactCoordinater.createTransNode failed, Null Pointer") + return nil + } + if this.quit { + logger.Logger.Warn("transactCoordinater.createTransNode failed, module shutdowning") + return nil + } + transHandler := GetHandler(tnp.Tt) + if transHandler == nil { + logger.Logger.Warnf("transactCoordinater.createTransNode failed, TransNodeParam=%v", *tnp) + return nil + } + + if tnp.TId == TransNodeIDNil { + tnp.TId = this.spawnTransNodeID() + } + + if Config.tcs != nil { + tnp.SkeletonID = Config.tcs.GetSkeletonID() + tnp.AreaID = Config.tcs.GetAreaID() + } + tnp.TimeOut = timeout + tnp.ExpiresTs = time.Now().Add(timeout).UnixNano() + tnode := &TransNode{ + MyTnp: tnp, + handler: transHandler, + owner: this, + TransRep: &TransResult{}, + TransEnv: NewTransCtx(), + ud: ud, + createTime: time.Now(), + } + + this.addTransNode(tnode) + + if h, ok := timer.StartTimer(tta, tnode, tnp.TimeOut, 1); ok { + tnode.timeHandle = h + } else { + return nil + } + return tnode +} + +func (this *transactCoordinater) StartTrans(tnp *TransNodeParam, ud interface{}, timeout time.Duration) *TransNode { + if this.quit { + return nil + } + tnode := this.createTransNode(tnp, ud, timeout) + if tnode == nil { + return nil + } + return tnode +} + +func (this *transactCoordinater) ProcessTransResult(tid, childtid TransNodeID, retCode int, ud interface{}) bool { + tnode := this.getTransNode(tid) + if tnode == nil { + return false + } + ret := tnode.childTransRep(childtid, retCode, ud) + if ret != TransExeResult_Success { + return false + } + return true +} + +func (this *transactCoordinater) ProcessTransStart(parentTnp, myTnp *TransNodeParam, ud interface{}, timeout time.Duration) bool { + if this.quit { + logger.Logger.Warn("transactCoordinater.processTransStart find shutdowning, parent=", parentTnp, " selfparam=", myTnp) + return false + } + tnode := this.createTransNode(myTnp, ud, timeout) + if tnode == nil { + return false + } + + tnode.ParentTnp = parentTnp + tnode.ownerObj = core.CoreObject() + ret := tnode.execute(ud) + if ret != TransExeResult_Success { + return false + } + return true +} + +func (this *transactCoordinater) ProcessTransCmd(tid TransNodeID, cmd TransCmd) bool { + tnode := this.getTransNode(tid) + if tnode == nil { + return false + } + + switch cmd { + case TransCmd_Commit: + tnode.commit() + case TransCmd_RollBack: + tnode.rollback(TransNodeIDNil) + } + return true +} + +func (this *transactCoordinater) getTransNode(tid TransNodeID) *TransNode { + this.lock.Lock() + defer this.lock.Unlock() + if v, exist := this.transPool[tid]; exist { + return v + } + return nil +} + +func (this *transactCoordinater) addTransNode(tnode *TransNode) { + this.lock.Lock() + defer this.lock.Unlock() + this.transPool[tnode.MyTnp.TId] = tnode +} + +func (this *transactCoordinater) delTransNode(tnode *TransNode) { + this.lock.Lock() + defer this.lock.Unlock() + delete(this.transPool, tnode.MyTnp.TId) +} + +func init() { + module.RegisteModule(DTCModule, time.Hour, 1) +} + +func ProcessTransResult(tid, childtid TransNodeID, retCode int, ud interface{}) bool { + return DTCModule.ProcessTransResult(tid, childtid, retCode, ud) +} + +func ProcessTransStart(parentTnp, myTnp *TransNodeParam, ud interface{}, timeout time.Duration) bool { + return DTCModule.ProcessTransStart(parentTnp, myTnp, ud, timeout) +} + +func ProcessTransCmd(tid TransNodeID, cmd TransCmd) bool { + return DTCModule.ProcessTransCmd(tid, cmd) +} diff --git a/core/transact/transctx.go b/core/transact/transctx.go new file mode 100644 index 0000000..a0f056a --- /dev/null +++ b/core/transact/transctx.go @@ -0,0 +1,39 @@ +// transctx +package transact + +import ( + "sync" +) + +type TransCtx struct { + fields map[interface{}]interface{} + lock *sync.RWMutex +} + +func NewTransCtx() *TransCtx { + tc := &TransCtx{ + lock: new(sync.RWMutex), + } + return tc +} + +func (this *TransCtx) SetField(k, v interface{}) { + this.lock.Lock() + if this.fields == nil { + this.fields = make(map[interface{}]interface{}) + } + this.fields[k] = v + this.lock.Unlock() +} + +func (this *TransCtx) GetField(k interface{}) interface{} { + this.lock.RLock() + if this.fields != nil { + if v, exist := this.fields[k]; exist { + this.lock.RUnlock() + return v + } + } + this.lock.RUnlock() + return nil +} diff --git a/core/transact/transfactory.go b/core/transact/transfactory.go new file mode 100644 index 0000000..ea84eae --- /dev/null +++ b/core/transact/transfactory.go @@ -0,0 +1,25 @@ +package transact + +import ( + "fmt" + + "mongo.games.com/goserver/core/logger" +) + +var transactionHandlerPool = make(map[TransType]TransHandler) + +func GetHandler(tt TransType) TransHandler { + if v, exist := transactionHandlerPool[tt]; exist { + return v + } + return nil +} + +func RegisteHandler(tt TransType, th TransHandler) { + if _, exist := transactionHandlerPool[tt]; exist { + panic(fmt.Sprintf("TransHandlerFactory repeate registe handler, type=%v", tt)) + return + } + logger.Logger.Trace("transact.RegisteHandler:", tt) + transactionHandlerPool[tt] = th +} diff --git a/core/transact/transhandler.go b/core/transact/transhandler.go new file mode 100644 index 0000000..0967cfe --- /dev/null +++ b/core/transact/transhandler.go @@ -0,0 +1,49 @@ +// transhandler +package transact + +type TransHandler interface { + OnExcute(n *TransNode, ud interface{}) TransExeResult + OnCommit(n *TransNode) TransExeResult + OnRollBack(n *TransNode) TransExeResult + OnChildTransRep(n *TransNode, hChild TransNodeID, retCode int, ud interface{}) TransExeResult +} + +type OnExecuteWrapper func(n *TransNode, ud interface{}) TransExeResult +type OnCommitWrapper func(n *TransNode) TransExeResult +type OnRollBackWrapper func(n *TransNode) TransExeResult +type OnChildRespWrapper func(n *TransNode, hChild TransNodeID, retCode int, ud interface{}) TransExeResult + +type TransHanderWrapper struct { + OnExecuteWrapper + OnCommitWrapper + OnRollBackWrapper + OnChildRespWrapper +} + +func (wrapper *TransHanderWrapper) OnExcute(n *TransNode, ud interface{}) TransExeResult { + if wrapper.OnExecuteWrapper != nil { + return wrapper.OnExecuteWrapper(n, ud) + } + return TransExeResult_Success +} + +func (wrapper *TransHanderWrapper) OnCommit(n *TransNode) TransExeResult { + if wrapper.OnCommitWrapper != nil { + return wrapper.OnCommitWrapper(n) + } + return TransExeResult_Success +} + +func (wrapper *TransHanderWrapper) OnRollBack(n *TransNode) TransExeResult { + if wrapper.OnRollBackWrapper != nil { + return wrapper.OnRollBackWrapper(n) + } + return TransExeResult_Success +} + +func (wrapper *TransHanderWrapper) OnChildTransRep(n *TransNode, hChild TransNodeID, retCode int, ud interface{}) TransExeResult { + if wrapper.OnChildRespWrapper != nil { + return wrapper.OnChildRespWrapper(n, hChild, retCode, ud) + } + return TransExeResult_Success +} diff --git a/core/transact/transnode.go b/core/transact/transnode.go new file mode 100644 index 0000000..a565c19 --- /dev/null +++ b/core/transact/transnode.go @@ -0,0 +1,446 @@ +// transnode +package transact + +import ( + "time" + + "mongo.games.com/goserver/core/basic" + "mongo.games.com/goserver/core/timer" + "sync" + "sync/atomic" +) + +const ( + ///transact execute result + TransResult_Success int = iota + TransResult_Failed + TransResult_TimeOut + TransResult_Max +) +const ( + ///transact result + TransExeResult_Success TransExeResult = iota + TransExeResult_Failed + TransExeResult_Yield + TransExeResult_NullPointer + TransExeResult_NoStart + TransExeResult_NoSetHandler + TransExeResult_ChildNodeNotExist + TransExeResult_ChildNodeRepeateRet + TransExeResult_AsynFailed + TransExeResult_HadDone + TransExeResult_StartChildFailed + TransExeResult_UnsafeExecuteEnv +) +const ( + ///transact owner type + TransOwnerType_Invalid TransOwnerType = iota + TransOwnerType_Max +) +const ( + ///transact command type + TransCmd_Invalid TransCmd = iota + TransCmd_Commit + TransCmd_RollBack +) +const ( + ///transact + TransRootNodeLevel int = 0 + DefaultTransactTimeout time.Duration = 30 * time.Second +) + +var ( + TransNodeIDNil = TransNodeID(0) + transStats = new(sync.Map) +) + +type TransExeResult int +type TransOwnerType int +type TransCmd int +type TransNodeID int64 +type TransNodeParam struct { + TId TransNodeID + Tt TransType + Ot TransOwnerType + Tct TransactCommitPolicy + Oid int + SkeletonID int + LevelNo int + AreaID int + TimeOut time.Duration + ExpiresTs int64 +} + +type TransResult struct { + RetCode int + RetFiels interface{} +} + +const ( + TransStatsOp_Exe = iota + TransStatsOp_Rollback + TransStatsOp_Commit + TransStatsOp_Yiled + TransStatsOp_Resume + TransStatsOp_Timeout +) + +type TransStats struct { + ExecuteTimes int64 + RollbackTimes int64 + CommitTimes int64 + TimeoutTimes int64 + YieldTimes int64 + ResumeTimes int64 + TotalRuningTime int64 + MaxRuningTime int64 +} + +func (stats *TransStats) incStats(op int) { + switch op { + case TransStatsOp_Exe: + atomic.AddInt64(&stats.ExecuteTimes, 1) + case TransStatsOp_Rollback: + atomic.AddInt64(&stats.RollbackTimes, 1) + case TransStatsOp_Commit: + atomic.AddInt64(&stats.CommitTimes, 1) + case TransStatsOp_Yiled: + atomic.AddInt64(&stats.YieldTimes, 1) + case TransStatsOp_Resume: + atomic.AddInt64(&stats.ResumeTimes, 1) + case TransStatsOp_Timeout: + atomic.AddInt64(&stats.TimeoutTimes, 1) + } +} + +type TransCallback func(*TransNode) +type TransBrotherNotify func(*TransNode, TransExeResult) +type TransNode struct { + TransEnv *TransCtx + TransRep *TransResult + MyTnp *TransNodeParam + ParentTnp *TransNodeParam + ownerObj *basic.Object + Childs map[TransNodeID]*TransNodeParam + finChild map[TransNodeID]interface{} + timeHandle timer.TimerHandle + handler TransHandler + AsynCallback TransCallback + brothers map[*TransNode]TransBrotherNotify + createTime time.Time + start bool + yield bool + resume bool + done bool + owner *transactCoordinater + ud interface{} +} + +func (this *TransNode) incStats(op int) { + if s, exist := transStats.Load(this.MyTnp.Tt); exist { + if stats, ok := s.(*TransStats); ok { + stats.incStats(op) + } + } else { + stats := &TransStats{} + transStats.Store(this.MyTnp.Tt, stats) + stats.incStats(op) + } +} + +func (this *TransNode) statsRuningTime() { + if s, exist := transStats.Load(this.MyTnp.Tt); exist { + if stats, ok := s.(*TransStats); ok { + runingTime := int64(time.Now().Sub(this.createTime) / time.Millisecond) + if runingTime > stats.MaxRuningTime { + stats.MaxRuningTime = runingTime + } + stats.TotalRuningTime += runingTime + } + } +} + +func (this *TransNode) execute(ud interface{}) TransExeResult { + if this == nil { + return TransExeResult_NullPointer + } + if this.handler == nil { + return TransExeResult_NoSetHandler + } + this.start = true + ret := this.handler.OnExcute(this, ud) + this.incStats(TransStatsOp_Exe) + if ret == TransExeResult_Yield { + return this.Yield() + } + + return this.doneExecRet(ret) +} + +func (this *TransNode) doneExecRet(ter TransExeResult) TransExeResult { + if this.done { + return TransExeResult_HadDone + } + if ter == TransExeResult_Success { + if len(this.Childs) == len(this.finChild) { + if this.MyTnp.LevelNo <= TransRootNodeLevel { + return this.commit() + } else { + if Config.tcs != nil { + this.TransRep.RetCode = TransResult_Success + Config.tcs.SendTransResult(this.ParentTnp, this.MyTnp, this.TransRep) + } + if this.MyTnp.Tct == TransactCommitPolicy_SelfDecide { + return this.commit() + } + } + } + } else { + if this.MyTnp.LevelNo == TransRootNodeLevel { + return this.rollback(TransNodeIDNil) + } else { + if Config.tcs != nil { + this.TransRep.RetCode = TransResult_Failed + Config.tcs.SendTransResult(this.ParentTnp, this.MyTnp, this.TransRep) + } + return this.rollback(TransNodeIDNil) + } + } + return TransExeResult_Success +} + +func (this *TransNode) commit() TransExeResult { + defer this.owner.releaseTrans(this) + + if this == nil { + return TransExeResult_NullPointer + } + if !this.start { + return TransExeResult_NoStart + } + if this.handler == nil { + return TransExeResult_NoSetHandler + } + if this.done { + return TransExeResult_HadDone + } + + defer this.notifyBrother(TransExeResult_Success) + + this.done = true + this.handler.OnCommit(this) + this.incStats(TransStatsOp_Commit) + this.statsRuningTime() + if len(this.Childs) > 0 && Config.tcs != nil { + for _, v := range this.Childs { + if v.Tct == TransactCommitPolicy_TwoPhase { + Config.tcs.SendCmdToTransNode(v, TransCmd_Commit) + } + } + } + + return TransExeResult_Success +} + +func (this *TransNode) rollback(exclude TransNodeID) TransExeResult { + defer this.owner.releaseTrans(this) + + if this == nil { + return TransExeResult_NullPointer + } + if !this.start { + return TransExeResult_NoStart + } + if this.handler == nil { + return TransExeResult_NoSetHandler + } + if this.done { + return TransExeResult_HadDone + } + + defer this.notifyBrother(TransExeResult_Failed) + + this.done = true + this.handler.OnRollBack(this) + this.incStats(TransStatsOp_Rollback) + this.statsRuningTime() + if len(this.Childs) > 0 && Config.tcs != nil { + for k, v := range this.Childs { + if k != exclude && v.Tct == TransactCommitPolicy_TwoPhase { + Config.tcs.SendCmdToTransNode(v, TransCmd_RollBack) + } + } + } + + return TransExeResult_Success +} + +func (this *TransNode) timeout() TransExeResult { + if this == nil { + return TransExeResult_NullPointer + } + if !this.start { + return TransExeResult_NoStart + } + if this.handler == nil { + return TransExeResult_NoSetHandler + } + if this.done { + return TransExeResult_HadDone + } + if this.MyTnp.LevelNo > TransRootNodeLevel { + if Config.tcs != nil { + this.TransRep.RetCode = TransResult_TimeOut + Config.tcs.SendTransResult(this.ParentTnp, this.MyTnp, this.TransRep) + } + } + this.incStats(TransStatsOp_Timeout) + this.rollback(TransNodeIDNil) + return TransExeResult_Success +} + +func (this *TransNode) childTransRep(child TransNodeID, retCode int, ud interface{}) TransExeResult { + if this == nil { + return TransExeResult_NullPointer + } + if this.handler == nil { + return TransExeResult_NoSetHandler + } + if !this.start { + return TransExeResult_NoStart + } + if this.done { + return TransExeResult_HadDone + } + if _, exist := this.Childs[child]; !exist { + return TransExeResult_ChildNodeNotExist + } + if this.finChild == nil { + this.finChild = make(map[TransNodeID]interface{}) + } + if _, exist := this.finChild[child]; exist { + return TransExeResult_ChildNodeRepeateRet + } + this.finChild[child] = ud + ret := this.handler.OnChildTransRep(this, child, retCode, ud) + if retCode == TransResult_Success && ret == TransExeResult_Success { + // the child nodes are returned and also run their own end (note: they may be executed asynchronously) + if len(this.Childs) == len(this.finChild) && this.yield == this.resume { + if this.MyTnp.LevelNo == TransRootNodeLevel { + this.commit() + } else { + if Config.tcs != nil { + this.TransRep.RetCode = retCode + Config.tcs.SendTransResult(this.ParentTnp, this.MyTnp, this.TransRep) + } + if this.MyTnp.Tct == TransactCommitPolicy_SelfDecide { + this.commit() + } + } + } + } else { + // They are not the root, then the parent would like to report fails + if this.MyTnp.LevelNo > TransRootNodeLevel { + if Config.tcs != nil { + this.TransRep.RetCode = retCode + Config.tcs.SendTransResult(this.ParentTnp, this.MyTnp, this.TransRep) + } + } + var exclude TransNodeID + if retCode != TransResult_Success { + exclude = child + } + // Sub-transaction fails or times out or the results were not satisfactory, timing optimization, advance RollBack + this.rollback(exclude) + } + + return TransExeResult_Success +} + +func (this *TransNode) StartChildTrans(tnp *TransNodeParam, ud interface{}, timeout time.Duration) TransExeResult { + if this.done { + return TransExeResult_HadDone + } + + tnp.TId = this.owner.spawnTransNodeID() + tnp.TimeOut = timeout + tnp.ExpiresTs = time.Now().Add(timeout).UnixNano() + tnp.LevelNo = this.MyTnp.LevelNo + 1 + + if this.Childs == nil { + this.Childs = make(map[TransNodeID]*TransNodeParam) + } + this.Childs[tnp.TId] = tnp + if Config.tcs != nil { + Config.tcs.SendTransStart(this.MyTnp, tnp, ud) + } + return TransExeResult_Success +} + +func (this *TransNode) GetChildTransParam(childid TransNodeID) *TransNodeParam { + if v, exist := this.Childs[childid]; exist { + return v + } + return nil +} + +func (this *TransNode) Yield() TransExeResult { + this.yield = true + SendTranscatYield(this) + this.incStats(TransStatsOp_Yiled) + return TransExeResult_Success +} + +func (this *TransNode) Resume() TransExeResult { + this.resume = true + SendTranscatResume(this) + this.incStats(TransStatsOp_Resume) + return TransExeResult_Success +} + +func (this *TransNode) Go(obj *basic.Object) TransExeResult { + this.ownerObj = obj + return this.execute(this.ud) +} + +func (this *TransNode) checkExeOver() { + if this.resume == this.yield { + if this.AsynCallback != nil { + this.AsynCallback(this) + } + if this.done == false { + var ter TransExeResult + if this.TransRep.RetCode == TransResult_Success { + ter = TransExeResult_Success + } else { + ter = TransExeResult_AsynFailed + } + this.doneExecRet(ter) + } + } +} + +func (this *TransNode) MakeBrotherWith(brother *TransNode, tbn TransBrotherNotify) { + if this.brothers == nil { + this.brothers = make(map[*TransNode]TransBrotherNotify) + } + this.brothers[brother] = tbn +} + +func (this *TransNode) notifyBrother(ter TransExeResult) { + for k, v := range this.brothers { + v(k, ter) + } +} + +func Stats() map[int]TransStats { + stats := make(map[int]TransStats) + transStats.Range(func(k, v interface{}) bool { + if s, ok := v.(*TransStats); ok { + d := *s + stats[int(k.(TransType))] = d + } + return true + }) + return stats +} diff --git a/core/transact/transtimeouthandler.go b/core/transact/transtimeouthandler.go new file mode 100644 index 0000000..f46d7a7 --- /dev/null +++ b/core/transact/transtimeouthandler.go @@ -0,0 +1,16 @@ +package transact + +import ( + "mongo.games.com/goserver/core/timer" +) + +type transactTimerAction struct { +} + +func (t transactTimerAction) OnTimer(h timer.TimerHandle, ud interface{}) bool { + if trans, ok := ud.(*TransNode); ok { + trans.timeout() + return true + } + return false +} diff --git a/core/transact/transtype.go b/core/transact/transtype.go new file mode 100644 index 0000000..8ad42b4 --- /dev/null +++ b/core/transact/transtype.go @@ -0,0 +1,4 @@ +// transtype +package transact + +type TransType int diff --git a/core/utils/atomicidgen.go b/core/utils/atomicidgen.go new file mode 100644 index 0000000..054e09e --- /dev/null +++ b/core/utils/atomicidgen.go @@ -0,0 +1,28 @@ +// AtomicIdGen +package utils + +import ( + "sync/atomic" +) + +type AtomicIdGen struct { + cur uint32 + beg uint32 +} + +func (this *AtomicIdGen) NextId() uint32 { + return atomic.AddUint32(&this.cur, 1) +} + +func (this *AtomicIdGen) Reset() { + atomic.StoreUint32(&this.cur, this.beg) +} + +func (this *AtomicIdGen) SetStartPoint(startPoint uint32) { + this.beg = startPoint + this.Reset() +} + +func (this *AtomicIdGen) CurrId() uint32 { + return this.cur +} diff --git a/core/utils/clone.go b/core/utils/clone.go new file mode 100644 index 0000000..a2f57d2 --- /dev/null +++ b/core/utils/clone.go @@ -0,0 +1,134 @@ +package utils + +import ( + "reflect" +) + +//unsupport [Complex64,Complex128,Chan,Func,Interface,UnsafePointer] +func Clone(src interface{}) (dst interface{}) { + if !isStructPtr(reflect.TypeOf(src)) { + return nil + } + + sv := reflect.Indirect(reflect.ValueOf(src)) + if !sv.IsValid() { + return nil + } + st := sv.Type() + dv := reflect.New(st) + if !dv.IsValid() { + return nil + } + deepCopy(sv, dv.Elem(), st) + return dv.Interface() +} + +func deepCopy(src, dst reflect.Value, t reflect.Type) { + switch src.Kind() { + case reflect.String: + dst.SetString(src.String()) + case reflect.Bool: + dst.SetBool(src.Bool()) + case reflect.Float32, reflect.Float64: + dst.SetFloat(src.Float()) + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + dst.SetInt(src.Int()) + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + dst.SetUint(src.Uint()) + case reflect.Map: + deepCopyMap(src, dst, t) + case reflect.Array, reflect.Slice: + deepCopySlice(src, dst, t) + case reflect.Struct: + deepCopyStruct(src, dst, t) + } +} + +func deepCopyMap(src, dst reflect.Value, t reflect.Type) { + for _, key := range src.MapKeys() { + var nkey, nval reflect.Value + if key.IsValid() && key.CanSet() { + if key.Kind() == reflect.Ptr { + nkey = reflect.New(key.Elem().Type()) + } else { + nkey = reflect.New(key.Type()) + nkey = reflect.Indirect(nkey) + } + s := reflect.Indirect(key) + d := reflect.Indirect(nkey) + if s.IsValid() && d.IsValid() { + tt := s.Type() + deepCopy(s, d, tt) + } + } else { + nkey = key + } + if val := src.MapIndex(key); val.IsValid() && val.CanSet() { + if val.Kind() == reflect.Ptr { + nval = reflect.New(val.Elem().Type()) + } else { + nval = reflect.New(val.Type()) + nval = reflect.Indirect(nval) + } + s := reflect.Indirect(val) + d := reflect.Indirect(nval) + if s.IsValid() && d.IsValid() { + tt := s.Type() + deepCopy(s, d, tt) + } + } else { + nval = val + } + dst.SetMapIndex(nkey, nval) + } +} + +func deepCopySlice(src, dst reflect.Value, t reflect.Type) { + for i := 0; i < src.Len(); i++ { + sf := src.Index(i) + df := dst.Index(i) + + if sf.Kind() == reflect.Ptr { + df = reflect.New(sf.Elem().Type()) + dst.Index(i).Set(df) + } + sf = reflect.Indirect(sf) + df = reflect.Indirect(df) + if sf.IsValid() && df.IsValid() { + tt := sf.Type() + deepCopy(sf, df, tt) + } + } +} + +func deepCopyStruct(src, dst reflect.Value, t reflect.Type) { + for i := 0; i < t.NumField(); i++ { + sv := src.Field(i) + if sv.CanSet() && sv.IsValid() { + switch sv.Kind() { + case reflect.Ptr: + if !sv.IsNil() { + dst.Field(i).Set(reflect.New(sv.Elem().Type())) + } + case reflect.Array, reflect.Slice: + if !sv.IsNil() { + dst.Field(i).Set(reflect.MakeSlice(sv.Type(), sv.Len(), sv.Cap())) + } + case reflect.Map: + if !sv.IsNil() { + dst.Field(i).Set(reflect.MakeMap(sv.Type())) + } + } + sf := reflect.Indirect(sv) + df := reflect.Indirect(dst.Field(i)) + if sf.IsValid() && df.IsValid() { + tt := sf.Type() + deepCopy(sf, df, tt) + } + } + } +} + +func isStructPtr(t reflect.Type) bool { + return t.Kind() == reflect.Ptr && t.Elem().Kind() == reflect.Struct +} diff --git a/core/utils/clone_test.go b/core/utils/clone_test.go new file mode 100644 index 0000000..6d8015a --- /dev/null +++ b/core/utils/clone_test.go @@ -0,0 +1,69 @@ +package utils + +import ( + "testing" + + "google.golang.org/protobuf/proto" +) + +type StructD struct { + Int4 int +} + +type StructC struct { + Int3 int + DMap map[int]*StructD + DArr []StructD + TMap map[int]struct{} +} + +type StructB struct { + StructC + C *int32 + IntSlice []*int32 + Map map[int]string +} +type StructA struct { + IntValue int + StrValue string + InnerValue *StructB +} + +func TestClone(t *testing.T) { + a := &StructA{IntValue: 1, StrValue: "test", + InnerValue: &StructB{ + C: proto.Int(9), + IntSlice: []*int32{proto.Int(1), proto.Int(2), proto.Int(3), proto.Int(4), proto.Int(5), proto.Int(6), proto.Int(7), proto.Int(8), proto.Int(9), proto.Int(0)}, + Map: map[int]string{1: "test", 2: "Hello"}, + StructC: StructC{Int3: 33, DMap: map[int]*StructD{1: &StructD{Int4: 44}}}, + }, + } + b := Clone(a).(*StructA) + //t.Trace(a, b) + + b.InnerValue.IntSlice[0] = proto.Int(99) + b.InnerValue.IntSlice = b.InnerValue.IntSlice[:7] + //t.Tracef("%#v %#v %#v\r\n", a, a.InnerValue.IntSlice, *a.InnerValue.C) + //t.Tracef("%#v %#v %#v\r\n", b, b.InnerValue.IntSlice, *b.InnerValue.C) + //t.Tracef("%#v\r\n", a.InnerValue.Map) + //t.Tracef("%#v\r\n", b.InnerValue.Map) + //t.Tracef("%#v\r\n", a.InnerValue.StructC) + //t.Tracef("%#v\r\n", b.InnerValue.StructC) +} + +func BenchmarkClone(b *testing.B) { + a := &StructA{IntValue: 1, StrValue: "test", + InnerValue: &StructB{ + C: proto.Int(9), + IntSlice: []*int32{proto.Int(1), proto.Int(2), proto.Int(3), proto.Int(4), proto.Int(5), proto.Int(6), proto.Int(7), proto.Int(8), proto.Int(9), proto.Int(0)}, + Map: map[int]string{1: "test", 2: "Hello"}, + StructC: StructC{Int3: 33, DMap: map[int]*StructD{1: &StructD{Int4: 44}}}, + }, + } + + b.StartTimer() + for i := 0; i < b.N; i++ { + _ = Clone(a).(*StructA) + } + b.StopTimer() +} diff --git a/core/utils/debug.go b/core/utils/debug.go new file mode 100644 index 0000000..fdf2b5f --- /dev/null +++ b/core/utils/debug.go @@ -0,0 +1,482 @@ +// most reference from github.com/realint/dbgutil +package utils + +import ( + "bytes" + "fmt" + "log" + "reflect" + "runtime" +) + +var ( + dunno = []byte("???") + centerDot = []byte("·") + dot = []byte(".") + lbr = []byte("{") + lbrn = []byte("{\n") + com = []byte(",") + comn = []byte(",\n") + rbr = []byte("}") + comnrbr = []byte(",\n}") +) + +type pointerInfo struct { + prev *pointerInfo + n int + addr uintptr + pos int + used []int +} + +// +// print the data in console +// +func Display(data ...interface{}) { + display(true, data...) +} + +// +// return string +// +func GetDisplayString(data ...interface{}) string { + return display(false, data...) +} + +func GetCallStack() string { + var buf [4096]byte + len := runtime.Stack(buf[:], false) + return string(buf[:len]) +} + +func display(displayed bool, data ...interface{}) string { + var pc, file, line, ok = runtime.Caller(2) + + if !ok { + return "" + } + + var buf = new(bytes.Buffer) + + fmt.Fprintf(buf, "[Debug] at %s() [%s:%d]\n", function(pc), file, line) + + fmt.Fprintf(buf, "\n[Variables]\n") + + for i := 0; i < len(data); i += 2 { + var output = fomateinfo(len(data[i].(string))+3, data[i+1]) + fmt.Fprintf(buf, "%s = %s", data[i], output) + } + + if displayed { + log.Print(buf) + } + return buf.String() +} + +// +// return fomateinfo +// +func fomateinfo(headlen int, data ...interface{}) []byte { + var buf = new(bytes.Buffer) + + if len(data) > 1 { + fmt.Fprint(buf, " ") + + fmt.Fprint(buf, "[") + + fmt.Fprintln(buf) + } + + for k, v := range data { + var buf2 = new(bytes.Buffer) + var pointers *pointerInfo + var interfaces []reflect.Value = make([]reflect.Value, 0, 10) + + printKeyValue(buf2, reflect.ValueOf(v), &pointers, &interfaces, nil, true, " ", 1) + + if k < len(data)-1 { + fmt.Fprint(buf2, ", ") + } + + fmt.Fprintln(buf2) + + buf.Write(buf2.Bytes()) + } + + if len(data) > 1 { + fmt.Fprintln(buf) + + fmt.Fprint(buf, " ") + + fmt.Fprint(buf, "]") + } + + return buf.Bytes() +} + +func isSimpleType(val reflect.Value, kind reflect.Kind, pointers **pointerInfo, interfaces *[]reflect.Value) bool { + switch kind { + case reflect.Bool: + return true + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return true + case reflect.Uint8, reflect.Uint16, reflect.Uint, reflect.Uint32, reflect.Uint64: + return true + case reflect.Float32, reflect.Float64: + return true + case reflect.Complex64, reflect.Complex128: + return true + case reflect.String: + return true + case reflect.Chan: + return true + case reflect.Invalid: + return true + case reflect.Interface: + for _, in := range *interfaces { + if reflect.DeepEqual(in, val) { + return true + } + } + return false + case reflect.UnsafePointer: + if val.IsNil() { + return true + } + + var elem = val.Elem() + + if isSimpleType(elem, elem.Kind(), pointers, interfaces) { + return true + } + + var addr = val.Elem().UnsafeAddr() + + for p := *pointers; p != nil; p = p.prev { + if addr == p.addr { + return true + } + } + + return false + } + + return false +} + +func printKeyValue(buf *bytes.Buffer, val reflect.Value, pointers **pointerInfo, interfaces *[]reflect.Value, structFilter func(string, string) bool, formatOutput bool, indent string, level int) { + var t = val.Kind() + + switch t { + case reflect.Bool: + fmt.Fprint(buf, val.Bool()) + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + fmt.Fprint(buf, val.Int()) + case reflect.Uint8, reflect.Uint16, reflect.Uint, reflect.Uint32, reflect.Uint64: + fmt.Fprint(buf, val.Uint()) + case reflect.Float32, reflect.Float64: + fmt.Fprint(buf, val.Float()) + case reflect.Complex64, reflect.Complex128: + fmt.Fprint(buf, val.Complex()) + case reflect.UnsafePointer: + fmt.Fprintf(buf, "unsafe.Pointer(0x%X)", val.Pointer()) + case reflect.Ptr: + if val.IsNil() { + fmt.Fprint(buf, "nil") + return + } + + var addr = val.Elem().UnsafeAddr() + + for p := *pointers; p != nil; p = p.prev { + if addr == p.addr { + p.used = append(p.used, buf.Len()) + fmt.Fprintf(buf, "0x%X", addr) + return + } + } + + *pointers = &pointerInfo{ + prev: *pointers, + addr: addr, + pos: buf.Len(), + used: make([]int, 0), + } + + fmt.Fprint(buf, "&") + + printKeyValue(buf, val.Elem(), pointers, interfaces, structFilter, formatOutput, indent, level) + case reflect.String: + fmt.Fprint(buf, "\"", val.String(), "\"") + case reflect.Interface: + var value = val.Elem() + + if !value.IsValid() { + fmt.Fprint(buf, "nil") + } else { + for _, in := range *interfaces { + if reflect.DeepEqual(in, val) { + fmt.Fprint(buf, "repeat") + return + } + } + + *interfaces = append(*interfaces, val) + + printKeyValue(buf, value, pointers, interfaces, structFilter, formatOutput, indent, level+1) + } + case reflect.Struct: + var t = val.Type() + + fmt.Fprint(buf, t) + fmt.Fprint(buf, "{") + + for i := 0; i < val.NumField(); i++ { + if formatOutput { + fmt.Fprintln(buf) + } else { + fmt.Fprint(buf, " ") + } + + var name = t.Field(i).Name + + if formatOutput { + for ind := 0; ind < level; ind++ { + fmt.Fprint(buf, indent) + } + } + + fmt.Fprint(buf, name) + fmt.Fprint(buf, ": ") + + if structFilter != nil && structFilter(t.String(), name) { + fmt.Fprint(buf, "ignore") + } else { + printKeyValue(buf, val.Field(i), pointers, interfaces, structFilter, formatOutput, indent, level+1) + } + + fmt.Fprint(buf, ",") + } + + if formatOutput { + fmt.Fprintln(buf) + + for ind := 0; ind < level-1; ind++ { + fmt.Fprint(buf, indent) + } + } else { + fmt.Fprint(buf, " ") + } + + fmt.Fprint(buf, "}") + case reflect.Array, reflect.Slice: + fmt.Fprint(buf, val.Type()) + fmt.Fprint(buf, "{") + + var allSimple = true + + for i := 0; i < val.Len(); i++ { + var elem = val.Index(i) + + var isSimple = isSimpleType(elem, elem.Kind(), pointers, interfaces) + + if !isSimple { + allSimple = false + } + + if formatOutput && !isSimple { + fmt.Fprintln(buf) + } else { + fmt.Fprint(buf, " ") + } + + if formatOutput && !isSimple { + for ind := 0; ind < level; ind++ { + fmt.Fprint(buf, indent) + } + } + + printKeyValue(buf, elem, pointers, interfaces, structFilter, formatOutput, indent, level+1) + + if i != val.Len()-1 || !allSimple { + fmt.Fprint(buf, ",") + } + } + + if formatOutput && !allSimple { + fmt.Fprintln(buf) + + for ind := 0; ind < level-1; ind++ { + fmt.Fprint(buf, indent) + } + } else { + fmt.Fprint(buf, " ") + } + + fmt.Fprint(buf, "}") + case reflect.Map: + var t = val.Type() + var keys = val.MapKeys() + + fmt.Fprint(buf, t) + fmt.Fprint(buf, "{") + + var allSimple = true + + for i := 0; i < len(keys); i++ { + var elem = val.MapIndex(keys[i]) + + var isSimple = isSimpleType(elem, elem.Kind(), pointers, interfaces) + + if !isSimple { + allSimple = false + } + + if formatOutput && !isSimple { + fmt.Fprintln(buf) + } else { + fmt.Fprint(buf, " ") + } + + if formatOutput && !isSimple { + for ind := 0; ind <= level; ind++ { + fmt.Fprint(buf, indent) + } + } + + printKeyValue(buf, keys[i], pointers, interfaces, structFilter, formatOutput, indent, level+1) + fmt.Fprint(buf, ": ") + printKeyValue(buf, elem, pointers, interfaces, structFilter, formatOutput, indent, level+1) + + if i != val.Len()-1 || !allSimple { + fmt.Fprint(buf, ",") + } + } + + if formatOutput && !allSimple { + fmt.Fprintln(buf) + + for ind := 0; ind < level-1; ind++ { + fmt.Fprint(buf, indent) + } + } else { + fmt.Fprint(buf, " ") + } + + fmt.Fprint(buf, "}") + case reflect.Chan: + fmt.Fprint(buf, val.Type()) + case reflect.Invalid: + fmt.Fprint(buf, "invalid") + default: + fmt.Fprint(buf, "unknow") + } +} + +func printPointerInfo(buf *bytes.Buffer, headlen int, pointers *pointerInfo) { + var anyused = false + var pointerNum = 0 + + for p := pointers; p != nil; p = p.prev { + if len(p.used) > 0 { + anyused = true + } + pointerNum += 1 + p.n = pointerNum + } + + if anyused { + var pointerBufs = make([][]rune, pointerNum+1) + + for i := 0; i < len(pointerBufs); i++ { + var pointerBuf = make([]rune, buf.Len()+headlen) + + for j := 0; j < len(pointerBuf); j++ { + pointerBuf[j] = ' ' + } + + pointerBufs[i] = pointerBuf + } + + for pn := 0; pn <= pointerNum; pn++ { + for p := pointers; p != nil; p = p.prev { + if len(p.used) > 0 && p.n >= pn { + if pn == p.n { + pointerBufs[pn][p.pos+headlen] = '└' + + var maxpos = 0 + + for i, pos := range p.used { + if i < len(p.used)-1 { + pointerBufs[pn][pos+headlen] = '┴' + } else { + pointerBufs[pn][pos+headlen] = '┘' + } + + maxpos = pos + } + + for i := 0; i < maxpos-p.pos-1; i++ { + if pointerBufs[pn][i+p.pos+headlen+1] == ' ' { + pointerBufs[pn][i+p.pos+headlen+1] = '─' + } + } + } else { + pointerBufs[pn][p.pos+headlen] = '│' + + for _, pos := range p.used { + if pointerBufs[pn][pos+headlen] == ' ' { + pointerBufs[pn][pos+headlen] = '│' + } else { + pointerBufs[pn][pos+headlen] = '┼' + } + } + } + } + } + + buf.WriteString(string(pointerBufs[pn]) + "\n") + } + } +} + +// +// get stack info +// +func stack(skip int, indent string) []byte { + var buf = new(bytes.Buffer) + + for i := skip; ; i++ { + var pc, file, line, ok = runtime.Caller(i) + + if !ok { + break + } + + buf.WriteString(indent) + + fmt.Fprintf(buf, "at %s() [%s:%d]\n", function(pc), file, line) + } + + return buf.Bytes() +} + +// function returns, if possible, the name of the function containing the PC. +func function(pc uintptr) []byte { + fn := runtime.FuncForPC(pc) + if fn == nil { + return dunno + } + name := []byte(fn.Name()) + // The name includes the path name to the package, which is unnecessary + // since the file name is already included. Plus, it has center dots. + // That is, we see + // runtime/debug.*T·ptrmethod + // and want + // *T.ptrmethod + if period := bytes.Index(name, dot); period >= 0 { + name = name[period+1:] + } + name = bytes.Replace(name, centerDot, dot, -1) + return name +} diff --git a/core/utils/debug_test.go b/core/utils/debug_test.go new file mode 100644 index 0000000..f56af27 --- /dev/null +++ b/core/utils/debug_test.go @@ -0,0 +1,32 @@ +package utils + +import ( + "testing" +) + +type mytype struct { + next *mytype + prev *mytype +} + +func TestPrint(t *testing.T) { + Display("v1", 1, "v2", 2, "v3", 3) +} + +func TestPrintPoint(t *testing.T) { + var v1 = new(mytype) + var v2 = new(mytype) + + v1.prev = nil + v1.next = v2 + + v2.prev = v1 + v2.next = nil + + Display("v1", v1, "v2", v2) +} + +func TestPrintString(t *testing.T) { + str := GetDisplayString("v1", 1, "v2", 2) + println(str) +} diff --git a/core/utils/healthcheck.go b/core/utils/healthcheck.go new file mode 100644 index 0000000..e0032b7 --- /dev/null +++ b/core/utils/healthcheck.go @@ -0,0 +1,34 @@ +package utils + +//type DatabaseCheck struct { +//} + +//func (dc *DatabaseCheck) Check() error { +// if dc.isConnected() { +// return nil +// } else { +// return errors.New("can't connect database") +// } +//} + +//AddHealthCheck("database",&DatabaseCheck{}) + +var AdminCheckList map[string]HealthChecker + +type HealthChecker interface { + Check() error +} + +type HealthCheckerWrapper func() error + +func (hcw HealthCheckerWrapper) Check() error { + return hcw() +} + +func AddHealthCheck(name string, hc HealthChecker) { + AdminCheckList[name] = hc +} + +func init() { + AdminCheckList = make(map[string]HealthChecker) +} diff --git a/core/utils/idgen.go b/core/utils/idgen.go new file mode 100644 index 0000000..06be137 --- /dev/null +++ b/core/utils/idgen.go @@ -0,0 +1,32 @@ +// idgen +package utils + +import ( + "sync/atomic" +) + +type IdGen struct { + beg int32 + seq int32 +} + +func (this *IdGen) NextId() int { + seq := atomic.AddInt32(&this.seq, 1) + return int(seq) +} + +func (this *IdGen) Reset() { + atomic.StoreInt32(&this.seq, this.beg) +} + +func (this *IdGen) SetSeq(seq int) { + atomic.StoreInt32(&this.seq, int32(seq)) +} + +func (this *IdGen) SetStartPoint(startPoint int) { + atomic.StoreInt32(&this.beg, int32(startPoint)) +} + +func (this *IdGen) CurrId() int { + return int(atomic.LoadInt32(&this.seq)) +} diff --git a/core/utils/panic.go b/core/utils/panic.go new file mode 100644 index 0000000..89d14f0 --- /dev/null +++ b/core/utils/panic.go @@ -0,0 +1,86 @@ +package utils + +import ( + "runtime" + + "encoding/json" + "fmt" + "mongo.games.com/goserver/core/logger" + "sync" + "sync/atomic" + "time" +) + +var _panicStackMgr = &PanicStackMgr{ + items: make(map[string]*PanicStackInfo), +} + +type PanicStackMgr struct { + sync.RWMutex + items map[string]*PanicStackInfo +} + +type PanicStackInfo struct { + FirstTime time.Time + LastTime time.Time + Times int64 + ErrorMsg string + StackBuf string +} + +func DumpStackIfPanic(f string) { + if err := recover(); err != nil { + defer func() { //防止二次panic + if err := recover(); err != nil { + logger.Logger.Error(f, " panic.panic,error=", err) + } + }() + logger.Logger.Error(f, " panic,error=", err) + errMsg := fmt.Sprintf("%v", err) + var buf [4096]byte + n := runtime.Stack(buf[:], false) + logger.Logger.Error("stack--->", string(buf[:n])) + stk := make([]uintptr, 32) + m := runtime.Callers(0, stk[:]) + stk = stk[:m] + if len(stk) > 0 { + d, err := json.Marshal(stk) + if err == nil && len(d) > 0 { + key := string(d) + _panicStackMgr.Lock() + defer _panicStackMgr.Unlock() + tNow := time.Now() + if ps, exist := _panicStackMgr.items[key]; exist { + atomic.AddInt64(&ps.Times, 1) + ps.LastTime = tNow + } else { + ps = &PanicStackInfo{ + ErrorMsg: errMsg, + Times: 1, + StackBuf: string(buf[:n]), + FirstTime: tNow, + LastTime: tNow, + } + _panicStackMgr.items[key] = ps + } + } + } + } +} + +func DumpStack(f string) { + logger.Logger.Error(f) + var buf [4096]byte + len := runtime.Stack(buf[:], false) + logger.Logger.Error("stack--->", string(buf[:len])) +} + +func GetPanicStats() map[string]PanicStackInfo { + stats := make(map[string]PanicStackInfo) + _panicStackMgr.RLock() + defer _panicStackMgr.RUnlock() + for k, v := range _panicStackMgr.items { + stats[k] = *v + } + return stats +} diff --git a/core/utils/profile.go b/core/utils/profile.go new file mode 100644 index 0000000..6159ab6 --- /dev/null +++ b/core/utils/profile.go @@ -0,0 +1,123 @@ +package utils + +import ( + "fmt" + "io" + "log" + "os" + "runtime" + "runtime/debug" + "runtime/pprof" + "strconv" + "time" +) + +var startTime = time.Now() +var pid int + +func init() { + pid = os.Getpid() +} + +type RuntimeStats struct { + CountGoroutine int + CountHeap int + CountThread int + CountBlock int +} + +func ProcessInput(input string, w io.Writer) { + switch input { + case "lookup goroutine": + p := pprof.Lookup("goroutine") + p.WriteTo(w, 2) + case "lookup heap": + p := pprof.Lookup("heap") + p.WriteTo(w, 2) + case "lookup threadcreate": + p := pprof.Lookup("threadcreate") + p.WriteTo(w, 2) + case "lookup block": + p := pprof.Lookup("block") + p.WriteTo(w, 2) + case "start cpuprof": + StartCPUProfile() + case "stop cpuprof": + StopCPUProfile() + case "get memprof": + MemProf() + case "gc summary": + PrintGCSummary(w) + } +} + +func MemProf() { + if f, err := os.Create("mem-" + strconv.Itoa(pid) + ".memprof"); err != nil { + log.Fatal("record memory profile failed: %v", err) + } else { + runtime.GC() + defer f.Close() + pprof.WriteHeapProfile(f) + } +} + +func StartCPUProfile() { + f, err := os.Create("cpu-" + strconv.Itoa(pid) + ".pprof") + if err != nil { + log.Fatal(err) + } + pprof.StartCPUProfile(f) +} + +func StopCPUProfile() { + pprof.StopCPUProfile() +} + +func PrintGCSummary(w io.Writer) { + memStats := &runtime.MemStats{} + runtime.ReadMemStats(memStats) + gcstats := &debug.GCStats{PauseQuantiles: make([]time.Duration, 100)} + debug.ReadGCStats(gcstats) + + printGC(memStats, gcstats, w) +} + +func printGC(memStats *runtime.MemStats, gcstats *debug.GCStats, w io.Writer) { + + if gcstats.NumGC > 0 { + lastPause := gcstats.Pause[0] + elapsed := time.Now().Sub(startTime) + overhead := float64(gcstats.PauseTotal) / float64(elapsed) * 100 + allocatedRate := float64(memStats.TotalAlloc) / elapsed.Seconds() + + fmt.Fprintf(w, "NumGC:%d Pause:%s Pause(Avg):%s Overhead:%3.2f%% Alloc:%s Sys:%s Alloc(Rate):%s/s Histogram:%s %s %s \n", + gcstats.NumGC, + ToS(lastPause), + ToS(Avg(gcstats.Pause)), + overhead, + ToH(memStats.Alloc), + ToH(memStats.Sys), + ToH(uint64(allocatedRate)), + ToS(gcstats.PauseQuantiles[94]), + ToS(gcstats.PauseQuantiles[98]), + ToS(gcstats.PauseQuantiles[99])) + } else { + // while GC has disabled + elapsed := time.Now().Sub(startTime) + allocatedRate := float64(memStats.TotalAlloc) / elapsed.Seconds() + + fmt.Fprintf(w, "Alloc:%s Sys:%s Alloc(Rate):%s/s\n", + ToH(memStats.Alloc), + ToH(memStats.Sys), + ToH(uint64(allocatedRate))) + } +} + +func StatsRuntime() RuntimeStats { + stats := RuntimeStats{} + stats.CountGoroutine = runtime.NumGoroutine() + stats.CountThread, _ = runtime.ThreadCreateProfile(nil) + stats.CountHeap, _ = runtime.MemProfile(nil, true) + stats.CountBlock, _ = runtime.BlockProfile(nil) + return stats +} diff --git a/core/utils/profile_test.go b/core/utils/profile_test.go new file mode 100644 index 0000000..08478fa --- /dev/null +++ b/core/utils/profile_test.go @@ -0,0 +1,14 @@ +package utils + +import ( + "os" + "testing" +) + +func TestProcessInput(t *testing.T) { + ProcessInput("lookup goroutine", os.Stdout) + ProcessInput("lookup heap", os.Stdout) + ProcessInput("lookup threadcreate", os.Stdout) + ProcessInput("lookup block", os.Stdout) + ProcessInput("gc summary", os.Stdout) +} diff --git a/core/utils/rand.go b/core/utils/rand.go new file mode 100644 index 0000000..65b2eda --- /dev/null +++ b/core/utils/rand.go @@ -0,0 +1,69 @@ +package utils + +import ( + "errors" + "math/rand" + "strconv" + "strings" + "time" +) + +var MinMaxError = errors.New("Min cannot be greater than max.") +var Char_Buff = [26]string{"a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m", "n", "o", "p", "q", "r", "s", + "t", "u", "v", "w", "x", "y", "z"} + +func RandChoice(choices []interface{}) (interface{}, error) { + var winner interface{} + length := len(choices) + i, err := IntRange(0, length) + if err != nil { + return nil, err + } + winner = choices[i] + return winner, nil +} + +func IntRange(min, max int) (int, error) { + var result int + switch { + case min > max: + return result, MinMaxError + case min == max: + result = max + case min < max: + rand.Seed(time.Now().UnixNano()) + result = min + rand.Intn(max-min) + } + return result, nil +} + +func RandCode(codelen int) string { + if codelen == 0 { + return "" + } + numLen := rand.Intn(codelen) + charLen := codelen - numLen + var buff string + for i := 0; i < numLen; i++ { + buff = buff + strconv.Itoa(rand.Intn(10)) + } + for i := 0; i < charLen; i++ { + buff = buff + Char_Buff[rand.Intn(26)] + } + var code string + arr := rand.Perm(codelen) + for i := 0; i < 6; i++ { + code = code + string(buff[arr[i]]) + } + return strings.ToUpper(code) +} +func RandNumCode(codelen int) string { + if codelen == 0 { + return "" + } + var buff string + for i := 0; i < codelen; i++ { + buff = buff + strconv.Itoa(rand.Intn(10)) + } + return strings.ToUpper(buff) +} diff --git a/core/utils/slices.go b/core/utils/slices.go new file mode 100644 index 0000000..c11e5ca --- /dev/null +++ b/core/utils/slices.go @@ -0,0 +1,85 @@ +package utils + +import ( + "reflect" + "sort" +) + +func AppendEmptySliceField(slice reflect.Value) reflect.Value { + newField := reflect.Zero(slice.Type().Elem()) + return reflect.Append(slice, newField) +} + +func SetSliceLengh(slice reflect.Value, length int) reflect.Value { + if length > slice.Len() { + for i := slice.Len(); i < length; i++ { + slice = AppendEmptySliceField(slice) + } + } else if length < slice.Len() { + slice = slice.Slice(0, length) + } + + return slice +} + +func DeleteEmptySliceElementsVal(sliceVal reflect.Value) reflect.Value { + if sliceVal.Kind() != reflect.Slice { + panic("Argument is not a slice: " + sliceVal.String()) + } + zeroVal := reflect.Zero(sliceVal.Type().Elem()) + for i := 0; i < sliceVal.Len(); i++ { + elemVal := sliceVal.Index(i) + if reflect.DeepEqual(elemVal.Interface(), zeroVal.Interface()) { + before := sliceVal.Slice(0, i) + after := sliceVal.Slice(i+1, sliceVal.Len()) + sliceVal = reflect.AppendSlice(before, after) + i-- + } + } + return sliceVal +} + +func DeleteEmptySliceElements(slice interface{}) interface{} { + return DeleteEmptySliceElementsVal(reflect.ValueOf(slice)).Interface() +} + +func DeleteSliceElementVal(sliceVal reflect.Value, idx int) reflect.Value { + if idx < 0 || idx >= sliceVal.Len() { + return sliceVal + } + before := sliceVal.Slice(0, idx) + after := sliceVal.Slice(idx+1, sliceVal.Len()) + sliceVal = reflect.AppendSlice(before, after) + return sliceVal +} + +func DeleteSliceElement(slice interface{}, idx int) interface{} { + return DeleteSliceElementVal(reflect.ValueOf(slice), idx).Interface() +} + +// Implements sort.Interface +type SortableInterfaceSlice struct { + Slice []interface{} + LessFunc func(a, b interface{}) bool +} + +func (self *SortableInterfaceSlice) Len() int { + return len(self.Slice) +} + +func (self *SortableInterfaceSlice) Less(i, j int) bool { + return self.LessFunc(self.Slice[i], self.Slice[j]) +} + +func (self *SortableInterfaceSlice) Swap(i, j int) { + self.Slice[i], self.Slice[j] = self.Slice[j], self.Slice[i] +} + +func (self *SortableInterfaceSlice) Sort() { + sort.Sort(self) +} + +func SortInterfaceSlice(slice []interface{}, lessFunc func(a, b interface{}) bool) { + sortable := SortableInterfaceSlice{slice, lessFunc} + sortable.Sort() +} diff --git a/core/utils/utils.go b/core/utils/utils.go new file mode 100644 index 0000000..c618d9f --- /dev/null +++ b/core/utils/utils.go @@ -0,0 +1,93 @@ +package utils + +import ( + "fmt" + "mongo.games.com/goserver/core/logger" + "runtime" + "time" +) + +func Avg(items []time.Duration) time.Duration { + var sum time.Duration + for _, item := range items { + sum += item + } + return time.Duration(int64(sum) / int64(len(items))) +} + +// human readable format +func ToH(bytes uint64) string { + switch { + case bytes < 1024: + return fmt.Sprintf("%dB", bytes) + case bytes < 1024*1024: + return fmt.Sprintf("%.2fK", float64(bytes)/1024) + case bytes < 1024*1024*1024: + return fmt.Sprintf("%.2fM", float64(bytes)/1024/1024) + default: + return fmt.Sprintf("%.2fG", float64(bytes)/1024/1024/1024) + } +} + +// short string format +func ToS(d time.Duration) string { + + u := uint64(d) + if u < uint64(time.Second) { + switch { + case u == 0: + return "0" + case u < uint64(time.Microsecond): + return fmt.Sprintf("%.2fns", float64(u)) + case u < uint64(time.Millisecond): + return fmt.Sprintf("%.2fus", float64(u)/1000) + default: + return fmt.Sprintf("%.2fms", float64(u)/1000/1000) + } + } else { + switch { + case u < uint64(time.Minute): + return fmt.Sprintf("%.2fs", float64(u)/1000/1000/1000) + case u < uint64(time.Hour): + return fmt.Sprintf("%.2fm", float64(u)/1000/1000/1000/60) + default: + return fmt.Sprintf("%.2fh", float64(u)/1000/1000/1000/60/60) + } + } + +} + +func CatchPanic(f func()) (err interface{}) { + defer func() { + err = recover() + if err != nil { + logger.Logger.Warnf("%s panic: %s", f, err) + var buf [4096]byte + n := runtime.Stack(buf[:], false) + logger.Logger.Error("stack--->", string(buf[:n])) + } + }() + f() + return +} + +func RunPanicless(f func()) (panicless bool) { + defer func() { + err := recover() + panicless = err == nil + if err != nil { + logger.Logger.Warnf("%s panic: %s", f, err) + var buf [4096]byte + n := runtime.Stack(buf[:], false) + logger.Logger.Error("stack--->", string(buf[:n])) + } + }() + + f() + return +} + +func RepeatUntilPanicless(f func()) { + for !RunPanicless(f) { + } +} diff --git a/core/utils/waitor.go b/core/utils/waitor.go new file mode 100644 index 0000000..4fb5678 --- /dev/null +++ b/core/utils/waitor.go @@ -0,0 +1,48 @@ +package utils + +import ( + "sync/atomic" + + "mongo.games.com/goserver/core/logger" +) + +type Waitor struct { + name string + counter int32 + waiters int32 + c chan string +} + +func NewWaitor(name string) *Waitor { + w := &Waitor{name: name, c: make(chan string, 16)} + return w +} + +func (w *Waitor) Add(name string, delta int) { + v := atomic.AddInt32(&w.counter, int32(delta)) + if v < 0 { + panic("negative Waitor counter") + } + cnt := atomic.LoadInt32(&w.counter) + logger.Logger.Debugf("(w *Waitor)(%v:%p) Add(%v,%v) counter(%v)", w.name, w, name, delta, cnt) +} + +func (w *Waitor) Wait(name string) { + v := atomic.AddInt32(&w.waiters, 1) + if v > 1 { + panic("only support one waitor") + } + cnt := atomic.LoadInt32(&w.waiters) + logger.Logger.Debugf("(w *Waitor)(%v:%p) Waiter(%v) waiters(%v)", w.name, w, name, cnt) + for w.counter > 0 { + dname := <-w.c + v = atomic.AddInt32(&w.counter, -1) + cnt = atomic.LoadInt32(&w.counter) + logger.Logger.Debugf("(w *Waitor)(%v:%p) Waiter(%v) after(%v)done! counter(%v)", w.name, w, name, dname, cnt) + } +} + +func (w *Waitor) Done(name string) { + w.c <- name + logger.Logger.Debugf("(w *Waitor)(%v:%p) Done(%v)!!!", w.name, w, name) +} diff --git a/core/zk/zk.go b/core/zk/zk.go new file mode 100644 index 0000000..188c1e6 --- /dev/null +++ b/core/zk/zk.go @@ -0,0 +1,124 @@ +package zk + +import ( + "errors" + "path" + "strings" + "time" + + "github.com/samuel/go-zookeeper/zk" + "mongo.games.com/goserver/core/logger" + "mongo.games.com/goserver/core/module" +) + +var ( + // error + ErrNoChild = errors.New("zk: children is nil") + ErrNodeNotExist = errors.New("zk: node not exist") +) + +// Connect connect to zookeeper, and start a goroutine log the event. +func Connect(addr []string, timeout time.Duration) (*zk.Conn, error) { + conn, session, err := zk.Connect(addr, timeout) + if err != nil { + logger.Logger.Errorf("zk.Connect(\"%v\", %d) error(%v)", addr, timeout, err) + return nil, err + } + go func() { + for { + event := <-session + logger.Logger.Tracef("zookeeper get a event: %s", event.State.String()) + } + }() + return conn, nil +} + +// Create create zookeeper path, if path exists ignore error +func Create(conn *zk.Conn, fpath string) error { + // create zk root path + tpath := "" + for _, str := range strings.Split(fpath, "/")[1:] { + tpath = path.Join(tpath, "/", str) + logger.Logger.Tracef("create zookeeper path: \"%s\"", tpath) + _, err := conn.Create(tpath, []byte(""), 0, zk.WorldACL(zk.PermAll)) + if err != nil { + if err == zk.ErrNodeExists { + logger.Logger.Warnf("zk.create(\"%s\") exists", tpath) + } else { + logger.Logger.Errorf("zk.create(\"%s\") error(%v)", tpath, err) + return err + } + } + } + + return nil +} + +// RegisterTmp create a ephemeral node, and watch it, if node droped then shutdown. +func RegisterTemp(conn *zk.Conn, fpath string, data []byte) error { + tpath, err := conn.Create(path.Join(fpath)+"/", data, zk.FlagEphemeral|zk.FlagSequence, zk.WorldACL(zk.PermAll)) + if err != nil { + logger.Logger.Errorf("conn.Create(\"%s\", \"%s\", zk.FlagEphemeral|zk.FlagSequence) error(%v)", fpath, string(data), err) + return err + } + logger.Logger.Tracef("create a zookeeper node:%s", tpath) + // watch self + go func() { + for { + logger.Logger.Infof("zk path: \"%s\" set a watch", tpath) + exist, _, watch, err := conn.ExistsW(tpath) + if err != nil { + logger.Logger.Errorf("zk.ExistsW(\"%s\") error(%v)", tpath, err) + logger.Logger.Warnf("zk path: \"%s\" set watch failed, shutdown", tpath) + module.Stop() + return + } + if !exist { + logger.Logger.Warnf("zk path: \"%s\" not exist, shutdown", tpath) + module.Stop() + return + } + event := <-watch + logger.Logger.Infof("zk path: \"%s\" receive a event %v", tpath, event) + } + }() + return nil +} + +// GetNodesW get all child from zk path with a watch. +func GetNodesW(conn *zk.Conn, path string) ([]string, <-chan zk.Event, error) { + nodes, stat, watch, err := conn.ChildrenW(path) + if err != nil { + if err == zk.ErrNoNode { + return nil, nil, ErrNodeNotExist + } + logger.Logger.Errorf("zk.ChildrenW(\"%s\") error(%v)", path, err) + return nil, nil, err + } + if stat == nil { + return nil, nil, ErrNodeNotExist + } + if len(nodes) == 0 { + return nil, nil, ErrNoChild + } + return nodes, watch, nil +} + +// GetNodes get all child from zk path. +func GetNodes(conn *zk.Conn, path string) ([]string, error) { + nodes, stat, err := conn.Children(path) + if err != nil { + if err == zk.ErrNoNode { + return nil, ErrNodeNotExist + } + logger.Logger.Errorf("zk.Children(\"%s\") error(%v)", path, err) + return nil, err + } + if stat == nil { + return nil, ErrNodeNotExist + } + if len(nodes) == 0 { + return nil, ErrNoChild + } + return nodes, nil +} diff --git a/core/zk/zk_test.go b/core/zk/zk_test.go new file mode 100644 index 0000000..d4466d7 --- /dev/null +++ b/core/zk/zk_test.go @@ -0,0 +1,23 @@ +package zk + +import ( + "testing" + "time" +) + +func TestZK(t *testing.T) { + conn, err := Connect([]string{"10.33.21.152:2181"}, time.Second*30) + if err != nil { + t.Error(err) + } + defer conn.Close() + err = Create(conn, "/test/test") + if err != nil { + t.Error(err) + } + // registertmp + err = RegisterTemp(conn, "/test/test", "1") + if err != nil { + t.Error(err) + } +} diff --git a/doc.go b/doc.go new file mode 100644 index 0000000..ffa18a4 --- /dev/null +++ b/doc.go @@ -0,0 +1 @@ +package goserver diff --git a/examples/echoclient/config.json b/examples/echoclient/config.json new file mode 100644 index 0000000..c01806a --- /dev/null +++ b/examples/echoclient/config.json @@ -0,0 +1,83 @@ +{ + "netlib": { + "SrvInfo":{ + "Name": "EchoClient", + "Type": 1, + "Id": 101, + "AreaID": 1, + "Banner": [ + "=================", + "echo client", + "=================" + ] + }, + + "IoServices": [] + }, + + "module": { + "Options": { + "QueueBacklog": 1024, + "MaxDone": 1024, + "Interval": 100 + } + }, + + "executor": { + "Options": { + "QueueBacklog": 1024, + "MaxDone": 1024, + "Interval": 0 + }, + "Worker": { + "WorkerCnt": 8, + "Options": { + "QueueBacklog": 1024, + "MaxDone": 1024, + "Interval": 0 + } + } + }, + + "timer": { + "Options": { + "QueueBacklog": 1024, + "MaxDone": 1024, + "Interval": 100 + } + }, + + "core": { + "MaxProcs": 4 + }, + + "pressure": { + "Count":1, + "Connects": { + "Id": 201, + "Type": 2, + "AreaID": 0, + "Name": "EchoService", + "Ip": "gt.doudoubei.com", + "Port": 11111, + "Protocol":"tcp", + "Path":"/", + "MaxDone": 20, + "MaxPend": 20, + "MaxPacket": 65535, + "MaxConn": 8, + "RcvBuff": 8192, + "SndBuff": 8192, + "WriteTimeout": 30, + "ReadTimeout": 30, + "NoDelay": true, + "IsInnerLink": true, + "IsClient": true, + "IsAutoReconn": true, + "AllowMultiConn": true, + "SupportFragment": true, + "AuthKey": "1234567890", + "FilterChain": ["session-filter-trace","session-filter-auth","serversessionfilter"] + } + } +} \ No newline at end of file diff --git a/examples/echoclient/logger.xml b/examples/echoclient/logger.xml new file mode 100644 index 0000000..cb66204 --- /dev/null +++ b/examples/echoclient/logger.xml @@ -0,0 +1,22 @@ + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/examples/echoclient/main.go b/examples/echoclient/main.go new file mode 100644 index 0000000..81066c0 --- /dev/null +++ b/examples/echoclient/main.go @@ -0,0 +1,15 @@ +// main +package main + +import ( + "mongo.games.com/goserver/core" + "mongo.games.com/goserver/core/module" +) + +func main() { + defer core.ClosePackages() + core.LoadPackages("config.json") + + waiter := module.Start() + waiter.Wait("main") +} diff --git a/examples/echoclient/pressure.go b/examples/echoclient/pressure.go new file mode 100644 index 0000000..2255ecb --- /dev/null +++ b/examples/echoclient/pressure.go @@ -0,0 +1,63 @@ +package main + +import ( + "time" + + "mongo.games.com/goserver/core" + _ "mongo.games.com/goserver/core/builtin/action" + _ "mongo.games.com/goserver/core/builtin/filter" + "mongo.games.com/goserver/core/module" + "mongo.games.com/goserver/core/netlib" +) + +var ( + Config = Configuration{} + PressureModule = &PressureTest{} + StartCnt = 0 +) + +type Configuration struct { + Count int + Connects netlib.SessionConfig +} + +func (this *Configuration) Name() string { + return "pressure" +} + +func (this *Configuration) Init() error { + this.Connects.Init() + return nil +} + +func (this *Configuration) Close() error { + return nil +} + +type PressureTest struct { +} + +func (this PressureTest) ModuleName() string { + return "pressure-module" +} + +func (this *PressureTest) Init() { + cfg := Config.Connects + for i := 0; i < Config.Count; i++ { + cfg.Id += i + netlib.Connect(&cfg) + } +} + +func (this *PressureTest) Update() { + return +} + +func (this *PressureTest) Shutdown() { + module.UnregisteModule(this) +} + +func init() { + core.RegistePackage(&Config) + module.RegisteModule(PressureModule, time.Second*30, 50) +} diff --git a/examples/echoclient/scpacketponghandler.go b/examples/echoclient/scpacketponghandler.go new file mode 100644 index 0000000..701ca37 --- /dev/null +++ b/examples/echoclient/scpacketponghandler.go @@ -0,0 +1,35 @@ +package main + +import ( + "time" + + "mongo.games.com/goserver/core/netlib" + "mongo.games.com/goserver/examples/protocol" +) + +type SCPacketPongPacketFactory struct { +} + +type SCPacketPongHandler struct { +} + +func (this *SCPacketPongPacketFactory) CreatePacket() interface{} { + pack := &protocol.SCPacketPong{} + return pack +} + +func (this *SCPacketPongHandler) Process(session *netlib.Session, packetid int, data interface{}) error { + if pong, ok := data.(*protocol.SCPacketPong); ok { + ping := &protocol.CSPacketPing{ + TimeStamb: time.Now().Unix(), + Message: pong.GetMessage(), + } + session.Send(int(protocol.PacketID_PACKET_CS_PING), ping) + } + return nil +} + +func init() { + netlib.RegisterHandler(int(protocol.PacketID_PACKET_SC_PONG), &SCPacketPongHandler{}) + netlib.RegisterFactory(int(protocol.PacketID_PACKET_SC_PONG), &SCPacketPongPacketFactory{}) +} diff --git a/examples/echoclient/serversessionfilter.go b/examples/echoclient/serversessionfilter.go new file mode 100644 index 0000000..b5a917b --- /dev/null +++ b/examples/echoclient/serversessionfilter.go @@ -0,0 +1,45 @@ +// serversessionfilter +package main + +import ( + "time" + + "mongo.games.com/goserver/core/logger" + "mongo.games.com/goserver/core/netlib" + "mongo.games.com/goserver/examples/protocol" +) + +var ( + ServerSessionFilterName = "serversessionfilter" +) + +type ServerSessionFilter struct { + netlib.BasicSessionFilter +} + +func (ssf ServerSessionFilter) GetName() string { + return ServerSessionFilterName +} + +func (ssf *ServerSessionFilter) GetInterestOps() uint { + return 1 << netlib.InterestOps_Opened +} + +func (ssf *ServerSessionFilter) OnSessionOpened(s *netlib.Session) bool { + logger.Logger.Trace("(ssf *ServerSessionFilter) OnSessionOpened") + packet := &protocol.CSPacketPing{ + TimeStamb: time.Now().Unix(), + Message: []byte("=1234567890abcderghijklmnopqrstuvwxyz="), + } + //for i := 0; i < 1024*32; i++ { + // packet.Message = append(packet.Message, byte('x')) + //} + s.Send(int(protocol.PacketID_PACKET_CS_PING), packet) + return true +} + +func init() { + netlib.RegisteSessionFilterCreator(ServerSessionFilterName, func() netlib.SessionFilter { + return &ServerSessionFilter{} + }) +} diff --git a/examples/echoserver/config.json b/examples/echoserver/config.json new file mode 100644 index 0000000..944abf6 --- /dev/null +++ b/examples/echoserver/config.json @@ -0,0 +1,81 @@ +{ + "netlib": { + "SrvInfo":{ + "Name": "EchoServer", + "Type": 2, + "Id": 201, + "AreaID": 1, + "Banner": [ + "=================", + "echo server", + "=================" + ] + }, + + "IoServices": [ + { + "Id": 201, + "Type": 2, + "AreaId": 1, + "Name": "EchoService", + "Ip": "", + "Port": 2345, + "Protocol":"ws", + "Path":"/", + "MaxDone": 20, + "MaxPend": 20, + "MaxPacket": 65535, + "MaxConn": 10000, + "RcvBuff": 8192, + "SndBuff": 8192, + "WriteTimeout": 30, + "ReadTimeout": 30, + "IsInnerLink": true, + "NoDelay": true, + "SupportFragment": true, + "AuthKey": "1234567890", + "FilterChain": ["session-filter-trace","session-filter-auth"] + } + ] + }, + + "module": { + "Options": { + "QueueBacklog": 1024, + "MaxDone": 1024, + "Interval": 100 + } + }, + + "executor": { + "Options": { + "QueueBacklog": 1024, + "MaxDone": 1024, + "Interval": 0 + }, + "Worker": { + "WorkerCnt": 8, + "Options": { + "QueueBacklog": 1024, + "MaxDone": 1024, + "Interval": 0 + } + } + }, + + "timer": { + "Options": { + "QueueBacklog": 1024, + "MaxDone": 1024, + "Interval": 100 + } + }, + + "core": { + "MaxProcs": 4 + }, + + "cmdline": { + "SupportCmdLine": true + } +} \ No newline at end of file diff --git a/examples/echoserver/logger.xml b/examples/echoserver/logger.xml new file mode 100644 index 0000000..cb66204 --- /dev/null +++ b/examples/echoserver/logger.xml @@ -0,0 +1,22 @@ + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/examples/echoserver/main.go b/examples/echoserver/main.go new file mode 100644 index 0000000..134ad24 --- /dev/null +++ b/examples/echoserver/main.go @@ -0,0 +1,23 @@ +// main +package main + +import ( + "net/http" + _ "net/http/pprof" + + "mongo.games.com/goserver/core" + _ "mongo.games.com/goserver/core/builtin/filter" + "mongo.games.com/goserver/core/module" +) + +func main() { + defer core.ClosePackages() + core.LoadPackages("config.json") + //usage: go tool pprof http://localhost:6060/debug/pprof/heap + go func() { + http.ListenAndServe("localhost:6060", nil) + }() + + waiter := module.Start() + waiter.Wait("main") +} diff --git a/examples/echoserver/scpacketponghandler.go b/examples/echoserver/scpacketponghandler.go new file mode 100644 index 0000000..bde5bde --- /dev/null +++ b/examples/echoserver/scpacketponghandler.go @@ -0,0 +1,33 @@ +package main + +import ( + "mongo.games.com/goserver/core/netlib" + "mongo.games.com/goserver/examples/protocol" +) + +type CSPacketPingPacketFactory struct { +} + +type CSPacketPingHandler struct { +} + +func (this *CSPacketPingPacketFactory) CreatePacket() interface{} { + pack := &protocol.CSPacketPing{} + return pack +} + +func (this *CSPacketPingHandler) Process(session *netlib.Session, packetid int, data interface{}) error { + if ping, ok := data.(*protocol.CSPacketPing); ok { + pong := &protocol.SCPacketPong{ + TimeStamb: ping.GetTimeStamb(), + Message: ping.GetMessage(), + } + session.Send(int(protocol.PacketID_PACKET_SC_PONG), pong) + } + return nil +} + +func init() { + netlib.RegisterHandler(int(protocol.PacketID_PACKET_CS_PING), &CSPacketPingHandler{}) + netlib.RegisterFactory(int(protocol.PacketID_PACKET_CS_PING), &CSPacketPingPacketFactory{}) +} diff --git a/examples/gen_go.bat b/examples/gen_go.bat new file mode 100644 index 0000000..128e44b --- /dev/null +++ b/examples/gen_go.bat @@ -0,0 +1,13 @@ +@echo off +set work_path=%cd% +set proto_path=%cd%\protocol +set protoc3=%cd%\..\bin\protoc-3.5.1-win32\bin\protoc.exe +set protoc-gen-go-plugin-path="%cd%\..\bin\protoc-gen-go.exe" + +cd %proto_path% + for %%b in (,*.proto) do ( + echo %%b + %protoc3% --plugin=protoc-gen-go=%protoc-gen-go-plugin-path% --go_out=. %%b + ) + cd .. +pause \ No newline at end of file diff --git a/examples/other/config.json b/examples/other/config.json new file mode 100644 index 0000000..c61fbf7 --- /dev/null +++ b/examples/other/config.json @@ -0,0 +1,50 @@ +{ + "netlib": { + "SrvInfo": + { + "Name": "TimerServer", + "Banner": [ + "=================", + "timer server", + "=================" + ] + } + }, + + "core": { + "MaxProcs": 4, + "SupportCmdLine":true, + "SupportSignal": true, + "SupportAdmin": true, + "SlowMS": 200, + "Object": { + "QueueBacklog": 1024, + "MaxDone": 1024, + "Interval": 100 + }, + + "Executor": { + "Object": { + "QueueBacklog": 1024, + "MaxDone": 1024, + "Interval": 0 + }, + "Worker": { + "WorkerCnt": 8, + "Object": { + "QueueBacklog": 1024, + "MaxDone": 1024, + "Interval": 0 + } + } + }, + + "Timer": { + "Object": { + "QueueBacklog": 1024, + "MaxDone": 1024, + "Interval": 100 + } + } + } +} \ No newline at end of file diff --git a/examples/other/logger.xml b/examples/other/logger.xml new file mode 100644 index 0000000..cb66204 --- /dev/null +++ b/examples/other/logger.xml @@ -0,0 +1,22 @@ + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/examples/other/main.go b/examples/other/main.go new file mode 100644 index 0000000..81066c0 --- /dev/null +++ b/examples/other/main.go @@ -0,0 +1,15 @@ +// main +package main + +import ( + "mongo.games.com/goserver/core" + "mongo.games.com/goserver/core/module" +) + +func main() { + defer core.ClosePackages() + core.LoadPackages("config.json") + + waiter := module.Start() + waiter.Wait("main") +} diff --git a/examples/other/task.go b/examples/other/task.go new file mode 100644 index 0000000..be59f93 --- /dev/null +++ b/examples/other/task.go @@ -0,0 +1,79 @@ +package main + +import ( + "fmt" + "math/rand" + "mongo.games.com/goserver/core/basic" + "time" + + "mongo.games.com/goserver/core/module" + "mongo.games.com/goserver/core/task" +) + +var TaskExampleSington = &TaskExample{} + +type TaskExample struct { + id int +} + +// in task.Worker goroutine +func (this *TaskExample) Call(o *basic.Object) interface{} { + tNow := time.Now() + fmt.Println("[", this.id, "]TaskExample execute start ") + time.Sleep(time.Second * time.Duration(rand.Intn(10))) + fmt.Println("[", this.id, "]TaskExample execute end, take ", time.Now().Sub(tNow)) + return nil +} + +// in laucher goroutine +func (this *TaskExample) Done(i interface{}, t *task.Task) { + fmt.Println("TaskExample execute over") +} + +// ////////////////////////////////////////////////////////////////// +// / Module Implement [beg] +// ////////////////////////////////////////////////////////////////// +func (this *TaskExample) ModuleName() string { + return "taskexample" +} + +func (this *TaskExample) Init() { + for i := 1; i < 100; i++ { + th := &TaskExample{id: i} + t := task.New(nil, th, th, "test") + if b := t.StartByExecutor(fmt.Sprintf("%v", i)); !b { + fmt.Println("[", i, "]task lauch failed") + } else { + fmt.Println("[", i, "]task lauch success") + } + } + + for i := 100; i < 200; i++ { + th := &TaskExample{id: i} + t := task.New(nil, th, th, "test") + w := rand.Intn(100) + go func(id, n int) { + if b := t.StartByFixExecutor(fmt.Sprintf("test%v", n)); !b { + fmt.Println("[", id, "]task lauch failed") + } else { + fmt.Println("[", id, "]task lauch success") + } + }(i, w) + } +} + +func (this *TaskExample) Update() { + fmt.Println("TaskExample.Update") +} + +func (this *TaskExample) Shutdown() { + module.UnregisteModule(this) +} + +//////////////////////////////////////////////////////////////////// +/// Module Implement [end] +//////////////////////////////////////////////////////////////////// + +func init() { + module.RegisteModule(TaskExampleSington, time.Second, 0) +} diff --git a/examples/other/timer.go b/examples/other/timer.go new file mode 100644 index 0000000..111cc8e --- /dev/null +++ b/examples/other/timer.go @@ -0,0 +1,52 @@ +package main + +import ( + "fmt" + "time" + + "mongo.games.com/goserver/core/module" + "mongo.games.com/goserver/core/timer" +) + +var TimerExampleSington = &TimerExample{} + +type TimerExample struct { +} + +// ////////////////////////////////////////////////////////////////// +// / Module Implement [beg] +// ////////////////////////////////////////////////////////////////// +func (this *TimerExample) ModuleName() string { + return "timerexample" +} + +func (this *TimerExample) Init() { + var i int + h, b := timer.StartTimer(timer.TimerActionWrapper(func(h timer.TimerHandle, ud interface{}) bool { + i++ + fmt.Println(i, time.Now()) + + if i > 5 { + return false + } + + return true + }), nil, time.Second, 10) + fmt.Println("timer lauch ", h, b) +} + +func (this *TimerExample) Update() { + fmt.Println("timer queue len=", timer.TimerModule.TimerCount()) +} + +func (this *TimerExample) Shutdown() { + module.UnregisteModule(this) +} + +//////////////////////////////////////////////////////////////////// +/// Module Implement [end] +//////////////////////////////////////////////////////////////////// + +func init() { + module.RegisteModule(TimerExampleSington, time.Second, 0) +} diff --git a/examples/protocol/packetid.pb.go b/examples/protocol/packetid.pb.go new file mode 100644 index 0000000..7693dc7 --- /dev/null +++ b/examples/protocol/packetid.pb.go @@ -0,0 +1,132 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.25.0-devel +// protoc v3.5.1 +// source: packetid.proto + +package protocol + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type PacketID int32 + +const ( + PacketID_PACKET_UNKNOW PacketID = 0 + PacketID_PACKET_CS_PING PacketID = 1000 + PacketID_PACKET_SC_PONG PacketID = 1001 +) + +// Enum value maps for PacketID. +var ( + PacketID_name = map[int32]string{ + 0: "PACKET_UNKNOW", + 1000: "PACKET_CS_PING", + 1001: "PACKET_SC_PONG", + } + PacketID_value = map[string]int32{ + "PACKET_UNKNOW": 0, + "PACKET_CS_PING": 1000, + "PACKET_SC_PONG": 1001, + } +) + +func (x PacketID) Enum() *PacketID { + p := new(PacketID) + *p = x + return p +} + +func (x PacketID) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (PacketID) Descriptor() protoreflect.EnumDescriptor { + return file_packetid_proto_enumTypes[0].Descriptor() +} + +func (PacketID) Type() protoreflect.EnumType { + return &file_packetid_proto_enumTypes[0] +} + +func (x PacketID) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use PacketID.Descriptor instead. +func (PacketID) EnumDescriptor() ([]byte, []int) { + return file_packetid_proto_rawDescGZIP(), []int{0} +} + +var File_packetid_proto protoreflect.FileDescriptor + +var file_packetid_proto_rawDesc = []byte{ + 0x0a, 0x0e, 0x70, 0x61, 0x63, 0x6b, 0x65, 0x74, 0x69, 0x64, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x12, 0x08, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x2a, 0x47, 0x0a, 0x08, 0x50, 0x61, + 0x63, 0x6b, 0x65, 0x74, 0x49, 0x44, 0x12, 0x11, 0x0a, 0x0d, 0x50, 0x41, 0x43, 0x4b, 0x45, 0x54, + 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x10, 0x00, 0x12, 0x13, 0x0a, 0x0e, 0x50, 0x41, 0x43, + 0x4b, 0x45, 0x54, 0x5f, 0x43, 0x53, 0x5f, 0x50, 0x49, 0x4e, 0x47, 0x10, 0xe8, 0x07, 0x12, 0x13, + 0x0a, 0x0e, 0x50, 0x41, 0x43, 0x4b, 0x45, 0x54, 0x5f, 0x53, 0x43, 0x5f, 0x50, 0x4f, 0x4e, 0x47, + 0x10, 0xe9, 0x07, 0x42, 0x0c, 0x5a, 0x0a, 0x2e, 0x3b, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, + 0x6c, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_packetid_proto_rawDescOnce sync.Once + file_packetid_proto_rawDescData = file_packetid_proto_rawDesc +) + +func file_packetid_proto_rawDescGZIP() []byte { + file_packetid_proto_rawDescOnce.Do(func() { + file_packetid_proto_rawDescData = protoimpl.X.CompressGZIP(file_packetid_proto_rawDescData) + }) + return file_packetid_proto_rawDescData +} + +var file_packetid_proto_enumTypes = make([]protoimpl.EnumInfo, 1) +var file_packetid_proto_goTypes = []interface{}{ + (PacketID)(0), // 0: protocol.PacketID +} +var file_packetid_proto_depIdxs = []int32{ + 0, // [0:0] is the sub-list for method output_type + 0, // [0:0] is the sub-list for method input_type + 0, // [0:0] is the sub-list for extension type_name + 0, // [0:0] is the sub-list for extension extendee + 0, // [0:0] is the sub-list for field type_name +} + +func init() { file_packetid_proto_init() } +func file_packetid_proto_init() { + if File_packetid_proto != nil { + return + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_packetid_proto_rawDesc, + NumEnums: 1, + NumMessages: 0, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_packetid_proto_goTypes, + DependencyIndexes: file_packetid_proto_depIdxs, + EnumInfos: file_packetid_proto_enumTypes, + }.Build() + File_packetid_proto = out.File + file_packetid_proto_rawDesc = nil + file_packetid_proto_goTypes = nil + file_packetid_proto_depIdxs = nil +} diff --git a/examples/protocol/packetid.proto b/examples/protocol/packetid.proto new file mode 100644 index 0000000..95bd9a6 --- /dev/null +++ b/examples/protocol/packetid.proto @@ -0,0 +1,9 @@ +syntax = "proto3"; +package protocol; +option go_package = ".;protocol"; + +enum PacketID { + PACKET_UNKNOW = 0; + PACKET_CS_PING = 1000; + PACKET_SC_PONG = 1001; +} \ No newline at end of file diff --git a/examples/protocol/pingpong.pb.go b/examples/protocol/pingpong.pb.go new file mode 100644 index 0000000..a27623c --- /dev/null +++ b/examples/protocol/pingpong.pb.go @@ -0,0 +1,224 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.25.0-devel +// protoc v3.5.1 +// source: pingpong.proto + +package protocol + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type CSPacketPing struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + TimeStamb int64 `protobuf:"varint,1,opt,name=TimeStamb,proto3" json:"TimeStamb,omitempty"` + Message []byte `protobuf:"bytes,2,opt,name=Message,proto3" json:"Message,omitempty"` +} + +func (x *CSPacketPing) Reset() { + *x = CSPacketPing{} + if protoimpl.UnsafeEnabled { + mi := &file_pingpong_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CSPacketPing) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CSPacketPing) ProtoMessage() {} + +func (x *CSPacketPing) ProtoReflect() protoreflect.Message { + mi := &file_pingpong_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CSPacketPing.ProtoReflect.Descriptor instead. +func (*CSPacketPing) Descriptor() ([]byte, []int) { + return file_pingpong_proto_rawDescGZIP(), []int{0} +} + +func (x *CSPacketPing) GetTimeStamb() int64 { + if x != nil { + return x.TimeStamb + } + return 0 +} + +func (x *CSPacketPing) GetMessage() []byte { + if x != nil { + return x.Message + } + return nil +} + +type SCPacketPong struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + TimeStamb int64 `protobuf:"varint,1,opt,name=TimeStamb,proto3" json:"TimeStamb,omitempty"` + Message []byte `protobuf:"bytes,2,opt,name=Message,proto3" json:"Message,omitempty"` +} + +func (x *SCPacketPong) Reset() { + *x = SCPacketPong{} + if protoimpl.UnsafeEnabled { + mi := &file_pingpong_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *SCPacketPong) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SCPacketPong) ProtoMessage() {} + +func (x *SCPacketPong) ProtoReflect() protoreflect.Message { + mi := &file_pingpong_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SCPacketPong.ProtoReflect.Descriptor instead. +func (*SCPacketPong) Descriptor() ([]byte, []int) { + return file_pingpong_proto_rawDescGZIP(), []int{1} +} + +func (x *SCPacketPong) GetTimeStamb() int64 { + if x != nil { + return x.TimeStamb + } + return 0 +} + +func (x *SCPacketPong) GetMessage() []byte { + if x != nil { + return x.Message + } + return nil +} + +var File_pingpong_proto protoreflect.FileDescriptor + +var file_pingpong_proto_rawDesc = []byte{ + 0x0a, 0x0e, 0x70, 0x69, 0x6e, 0x67, 0x70, 0x6f, 0x6e, 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x12, 0x08, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x22, 0x46, 0x0a, 0x0c, 0x43, 0x53, + 0x50, 0x61, 0x63, 0x6b, 0x65, 0x74, 0x50, 0x69, 0x6e, 0x67, 0x12, 0x1c, 0x0a, 0x09, 0x54, 0x69, + 0x6d, 0x65, 0x53, 0x74, 0x61, 0x6d, 0x62, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x09, 0x54, + 0x69, 0x6d, 0x65, 0x53, 0x74, 0x61, 0x6d, 0x62, 0x12, 0x18, 0x0a, 0x07, 0x4d, 0x65, 0x73, 0x73, + 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x4d, 0x65, 0x73, 0x73, 0x61, + 0x67, 0x65, 0x22, 0x46, 0x0a, 0x0c, 0x53, 0x43, 0x50, 0x61, 0x63, 0x6b, 0x65, 0x74, 0x50, 0x6f, + 0x6e, 0x67, 0x12, 0x1c, 0x0a, 0x09, 0x54, 0x69, 0x6d, 0x65, 0x53, 0x74, 0x61, 0x6d, 0x62, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x09, 0x54, 0x69, 0x6d, 0x65, 0x53, 0x74, 0x61, 0x6d, 0x62, + 0x12, 0x18, 0x0a, 0x07, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x0c, 0x52, 0x07, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x42, 0x0c, 0x5a, 0x0a, 0x2e, 0x3b, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_pingpong_proto_rawDescOnce sync.Once + file_pingpong_proto_rawDescData = file_pingpong_proto_rawDesc +) + +func file_pingpong_proto_rawDescGZIP() []byte { + file_pingpong_proto_rawDescOnce.Do(func() { + file_pingpong_proto_rawDescData = protoimpl.X.CompressGZIP(file_pingpong_proto_rawDescData) + }) + return file_pingpong_proto_rawDescData +} + +var file_pingpong_proto_msgTypes = make([]protoimpl.MessageInfo, 2) +var file_pingpong_proto_goTypes = []interface{}{ + (*CSPacketPing)(nil), // 0: protocol.CSPacketPing + (*SCPacketPong)(nil), // 1: protocol.SCPacketPong +} +var file_pingpong_proto_depIdxs = []int32{ + 0, // [0:0] is the sub-list for method output_type + 0, // [0:0] is the sub-list for method input_type + 0, // [0:0] is the sub-list for extension type_name + 0, // [0:0] is the sub-list for extension extendee + 0, // [0:0] is the sub-list for field type_name +} + +func init() { file_pingpong_proto_init() } +func file_pingpong_proto_init() { + if File_pingpong_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_pingpong_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CSPacketPing); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_pingpong_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SCPacketPong); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_pingpong_proto_rawDesc, + NumEnums: 0, + NumMessages: 2, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_pingpong_proto_goTypes, + DependencyIndexes: file_pingpong_proto_depIdxs, + MessageInfos: file_pingpong_proto_msgTypes, + }.Build() + File_pingpong_proto = out.File + file_pingpong_proto_rawDesc = nil + file_pingpong_proto_goTypes = nil + file_pingpong_proto_depIdxs = nil +} diff --git a/examples/protocol/pingpong.proto b/examples/protocol/pingpong.proto new file mode 100644 index 0000000..5105f74 --- /dev/null +++ b/examples/protocol/pingpong.proto @@ -0,0 +1,13 @@ +syntax = "proto3"; +package protocol; +option go_package = ".;protocol"; + +message CSPacketPing { + int64 TimeStamb = 1; + bytes Message = 2; +} + +message SCPacketPong { + int64 TimeStamb = 1; + bytes Message = 2; +} \ No newline at end of file diff --git a/examples/protocol/txtype.go b/examples/protocol/txtype.go new file mode 100644 index 0000000..1dd629d --- /dev/null +++ b/examples/protocol/txtype.go @@ -0,0 +1,9 @@ +package protocol + +import ( + "mongo.games.com/goserver/core/transact" +) + +const ( + TxTrace transact.TransType = 1000 +) diff --git a/examples/protocol/txuserdata.go b/examples/protocol/txuserdata.go new file mode 100644 index 0000000..84c91a2 --- /dev/null +++ b/examples/protocol/txuserdata.go @@ -0,0 +1,7 @@ +package protocol + +type StructA struct { + X, Y, Z int + P *int + Desc string +} diff --git a/examples/txserver1/config.json b/examples/txserver1/config.json new file mode 100644 index 0000000..6f5b155 --- /dev/null +++ b/examples/txserver1/config.json @@ -0,0 +1,73 @@ +{ + "netlib": { + "SrvInfo": + { + "Name": "TxServer1", + "Type": 2, + "Id": 201, + "AreaID": 1, + "Banner": [ + "=================", + "tx server 1", + "=================" + ] + }, + + "IoServices": [ + { + "Id": 201, + "Type": 2, + "AreaId": 1, + "Name": "TxService1", + "Ip": "127.0.0.1", + "Port": 2346, + "MaxDone": 20, + "MaxPend": 20, + "MaxPacket": 65535, + "MaxConn": 10, + "RcvBuff": 8192, + "SndBuff": 8192, + "WriteTimeout": 30, + "ReadTimeout": 30, + "IsInnerLink": true, + "NoDelay": true, + "SupportFragment": true, + "AuthKey": "1234567890", + "FilterChain": ["session-filter-trace","session-filter-auth","session-filter-keepalive"], + "HandlerChain": ["session-srv-registe"] + }, + { + "Id": 202, + "Type": 2, + "AreaId": 1, + "Name": "TxService2", + "Ip": "127.0.0.1", + "Port": 2347, + "MaxDone": 20, + "MaxPend": 20, + "MaxPacket": 65535, + "MaxConn": 10, + "RcvBuff": 8192, + "SndBuff": 8192, + "WriteTimeout": 30, + "ReadTimeout": 30, + "IsInnerLink": true, + "IsClient": true, + "IsAutoReconn": true, + "NoDelay": true, + "SupportFragment": true, + "AuthKey": "1234567890", + "FilterChain": ["session-filter-trace","session-filter-auth","session-filter-keepalive"], + "HandlerChain": ["session-srv-registe"] + } + ] + }, + + "tx": { + "TxSkeletonName": "mongo.games.com/goserver/srvlib/txcommskeleton" + }, + + "core": { + "MaxProcs": 4 + } +} \ No newline at end of file diff --git a/examples/txserver1/dependent.go b/examples/txserver1/dependent.go new file mode 100644 index 0000000..945b854 --- /dev/null +++ b/examples/txserver1/dependent.go @@ -0,0 +1,9 @@ +// dummy +package main + +import ( + _ "mongo.games.com/goserver/core/builtin/action" + _ "mongo.games.com/goserver/core/builtin/filter" + _ "mongo.games.com/goserver/srvlib" + _ "mongo.games.com/goserver/srvlib/handler" +) diff --git a/examples/txserver1/logger.xml b/examples/txserver1/logger.xml new file mode 100644 index 0000000..cb66204 --- /dev/null +++ b/examples/txserver1/logger.xml @@ -0,0 +1,22 @@ + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/examples/txserver1/main.go b/examples/txserver1/main.go new file mode 100644 index 0000000..81066c0 --- /dev/null +++ b/examples/txserver1/main.go @@ -0,0 +1,15 @@ +// main +package main + +import ( + "mongo.games.com/goserver/core" + "mongo.games.com/goserver/core/module" +) + +func main() { + defer core.ClosePackages() + core.LoadPackages("config.json") + + waiter := module.Start() + waiter.Wait("main") +} diff --git a/examples/txserver1/tx_trace.go b/examples/txserver1/tx_trace.go new file mode 100644 index 0000000..0b465c3 --- /dev/null +++ b/examples/txserver1/tx_trace.go @@ -0,0 +1,53 @@ +package main + +import ( + "mongo.games.com/goserver/core/logger" + "mongo.games.com/goserver/core/netlib" + "mongo.games.com/goserver/core/transact" + "mongo.games.com/goserver/examples/protocol" + "mongo.games.com/goserver/srvlib" +) + +type traceTransHandler struct { +} + +func init() { + transact.RegisteHandler(protocol.TxTrace, &traceTransHandler{}) + srvlib.ServerSessionMgrSington.AddListener(&MyServerSessionRegisteListener{}) +} + +func (this *traceTransHandler) OnExcute(tNode *transact.TransNode, ud interface{}) transact.TransExeResult { + logger.Logger.Trace("traceTransHandler.OnExcute ") + userData := &protocol.StructA{} + err := netlib.UnmarshalPacketNoPackId(ud.([]byte), userData) + if err == nil { + logger.Logger.Tracef("==========%#v", userData) + } + return transact.TransExeResult_Success +} + +func (this *traceTransHandler) OnCommit(tNode *transact.TransNode) transact.TransExeResult { + logger.Logger.Trace("traceTransHandler.OnCommit ") + return transact.TransExeResult_Success +} + +func (this *traceTransHandler) OnRollBack(tNode *transact.TransNode) transact.TransExeResult { + logger.Logger.Trace("traceTransHandler.OnRollBack ") + return transact.TransExeResult_Success +} + +func (this *traceTransHandler) OnChildTransRep(tNode *transact.TransNode, hChild transact.TransNodeID, retCode int, ud interface{}) transact.TransExeResult { + logger.Logger.Trace("traceTransHandler.OnChildTransRep ") + return transact.TransExeResult_Success +} + +type MyServerSessionRegisteListener struct { +} + +func (mssrl *MyServerSessionRegisteListener) OnRegiste(*netlib.Session) { + logger.Logger.Trace("MyServerSessionRegisteListener.OnRegiste") +} + +func (mssrl *MyServerSessionRegisteListener) OnUnregiste(*netlib.Session) { + logger.Logger.Trace("MyServerSessionRegisteListener.OnUnregiste") +} diff --git a/examples/txserver2/config.json b/examples/txserver2/config.json new file mode 100644 index 0000000..348acfb --- /dev/null +++ b/examples/txserver2/config.json @@ -0,0 +1,49 @@ +{ + "netlib": { + "SrvInfo": + { + "Name": "TxServer2", + "Type": 2, + "Id": 202, + "AreaID": 1, + "Banner": [ + "=================", + "tx server 2", + "=================" + ] + }, + + "IoServices": [ + { + "Id": 202, + "Type": 2, + "AreaId": 1, + "Name": "TxService2", + "Ip": "127.0.0.1", + "Port": 2347, + "MaxDone": 20, + "MaxPend": 20, + "MaxPacket": 65535, + "MaxConn": 10, + "RcvBuff": 8192, + "SndBuff": 8192, + "WriteTimeout": 30, + "ReadTimeout": 30, + "IsInnerLink": true, + "NoDelay": true, + "SupportFragment": true, + "AuthKey": "1234567890", + "FilterChain": ["session-filter-trace","session-filter-auth","session-filter-keepalive"], + "HandlerChain": ["session-srv-registe"] + } + ] + }, + + "tx": { + "TxSkeletonName": "mongo.games.com/goserver/srvlib/txcommskeleton" + }, + + "core": { + "MaxProcs": 4 + } +} \ No newline at end of file diff --git a/examples/txserver2/dependent.go b/examples/txserver2/dependent.go new file mode 100644 index 0000000..945b854 --- /dev/null +++ b/examples/txserver2/dependent.go @@ -0,0 +1,9 @@ +// dummy +package main + +import ( + _ "mongo.games.com/goserver/core/builtin/action" + _ "mongo.games.com/goserver/core/builtin/filter" + _ "mongo.games.com/goserver/srvlib" + _ "mongo.games.com/goserver/srvlib/handler" +) diff --git a/examples/txserver2/logger.xml b/examples/txserver2/logger.xml new file mode 100644 index 0000000..cb66204 --- /dev/null +++ b/examples/txserver2/logger.xml @@ -0,0 +1,22 @@ + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/examples/txserver2/main.go b/examples/txserver2/main.go new file mode 100644 index 0000000..81066c0 --- /dev/null +++ b/examples/txserver2/main.go @@ -0,0 +1,15 @@ +// main +package main + +import ( + "mongo.games.com/goserver/core" + "mongo.games.com/goserver/core/module" +) + +func main() { + defer core.ClosePackages() + core.LoadPackages("config.json") + + waiter := module.Start() + waiter.Wait("main") +} diff --git a/examples/txserver2/tx_trace.go b/examples/txserver2/tx_trace.go new file mode 100644 index 0000000..6dde2d3 --- /dev/null +++ b/examples/txserver2/tx_trace.go @@ -0,0 +1,71 @@ +package main + +import ( + "mongo.games.com/goserver/core" + "mongo.games.com/goserver/core/logger" + "mongo.games.com/goserver/core/netlib" + "mongo.games.com/goserver/core/transact" + "mongo.games.com/goserver/examples/protocol" + "mongo.games.com/goserver/srvlib" +) + +type traceTransHandler struct { +} + +func init() { + transact.RegisteHandler(protocol.TxTrace, &traceTransHandler{}) + srvlib.ServerSessionMgrSington.AddListener(&MyServerSessionRegisteListener{}) +} + +func (this *traceTransHandler) OnExcute(tNode *transact.TransNode, ud interface{}) transact.TransExeResult { + logger.Logger.Trace("traceTransHandler.OnExcute ") + tnp := &transact.TransNodeParam{ + Tt: protocol.TxTrace, + Ot: transact.TransOwnerType(2), + Oid: 201, + AreaID: 1, + Tct: transact.TransactCommitPolicy_TwoPhase, + } + p := new(int) + *p = -2 + userData := protocol.StructA{X: 10, Y: -1, Z: 65535, P: p, Desc: "welcome!"} + tNode.StartChildTrans(tnp, userData, transact.DefaultTransactTimeout) + return transact.TransExeResult_Success +} + +func (this *traceTransHandler) OnCommit(tNode *transact.TransNode) transact.TransExeResult { + logger.Logger.Trace("traceTransHandler.OnCommit ") + return transact.TransExeResult_Success +} + +func (this *traceTransHandler) OnRollBack(tNode *transact.TransNode) transact.TransExeResult { + logger.Logger.Trace("traceTransHandler.OnRollBack ") + return transact.TransExeResult_Success +} + +func (this *traceTransHandler) OnChildTransRep(tNode *transact.TransNode, hChild transact.TransNodeID, retCode int, ud interface{}) transact.TransExeResult { + logger.Logger.Trace("traceTransHandler.OnChildTransRep ") + return transact.TransExeResult_Success +} + +type MyServerSessionRegisteListener struct { +} + +func (mssrl *MyServerSessionRegisteListener) OnRegiste(*netlib.Session) { + logger.Logger.Trace("MyServerSessionRegisteListener.OnRegiste") + tnp := &transact.TransNodeParam{ + Tt: protocol.TxTrace, + Ot: transact.TransOwnerType(2), + Oid: 202, + AreaID: 1, + } + + tNode := transact.DTCModule.StartTrans(tnp, nil, transact.DefaultTransactTimeout) + if tNode != nil { + tNode.Go(core.CoreObject()) + } +} + +func (mssrl *MyServerSessionRegisteListener) OnUnregiste(*netlib.Session) { + logger.Logger.Trace("MyServerSessionRegisteListener.OnUnregiste") +} diff --git a/mmo/accountsrv/config.json b/mmo/accountsrv/config.json new file mode 100644 index 0000000..bcdee7c --- /dev/null +++ b/mmo/accountsrv/config.json @@ -0,0 +1,103 @@ +{ + "netlib": { + "SrvInfo":{ + "Name": "AccountServer", + "Type": 3, + "Id": 301, + "AreaID": 1, + "Banner": [ + "=================", + "account server", + "=================" + ] + }, + + "IoServices": [ + { + "Id": 301, + "Type": 3, + "AreaId": 1, + "Name": "AccountService", + "Ip": "127.0.0.1", + "Port": 3001, + "MaxDone": 20, + "MaxPend": 20, + "MaxPacket": 65535, + "MaxConn": 10000, + "RcvBuff": 8192, + "SndBuff": 8192, + "WriteTimeout": 30, + "ReadTimeout": 30, + "IsInnerLink": true, + "NoDelay": true, + "SupportFragment": true, + "AuthKey": "1234567890", + "FilterChain": ["session-filter-auth","session-filter-keepalive"], + "HandlerChain": ["session-srv-registe"] + }, + { + "Id": 501, + "Type": 5, + "AreaId": 1, + "Name": "ManagerService", + "Ip": "127.0.0.1", + "Port": 5555, + "MaxDone": 20, + "MaxPend": 20, + "MaxPacket": 65535, + "MaxConn": 10000, + "RcvBuff": 8192, + "SndBuff": 8192, + "WriteTimeout": 30, + "ReadTimeout": 30, + "IsInnerLink": true, + "NoDelay": true, + "IsClient": true, + "SupportFragment": true, + "AuthKey": "1234567890", + "FilterChain": ["session-filter-auth","session-filter-keepalive"], + "HandlerChain": ["session-srv-registe","srv-service-handler"] + } + ] + }, + + "module": { + "Options": { + "QueueBacklog": 1024, + "MaxDone": 1024, + "Interval": 100 + } + }, + + "executor": { + "Options": { + "QueueBacklog": 1024, + "MaxDone": 1024, + "Interval": 0 + }, + "Worker": { + "WorkerCnt": 8, + "Options": { + "QueueBacklog": 1024, + "MaxDone": 1024, + "Interval": 0 + } + } + }, + + "timer": { + "Options": { + "QueueBacklog": 1024, + "MaxDone": 1024, + "Interval": 100 + } + }, + + "core": { + "MaxProcs": 4 + }, + + "cmdline": { + "SupportCmdLine": true + } +} \ No newline at end of file diff --git a/mmo/accountsrv/doc.go b/mmo/accountsrv/doc.go new file mode 100644 index 0000000..76f9af5 --- /dev/null +++ b/mmo/accountsrv/doc.go @@ -0,0 +1,5 @@ +package main + +// Account server. +// Responsibilities: +// 1:Responsible for account verification, login, logout. diff --git a/mmo/accountsrv/logger.xml b/mmo/accountsrv/logger.xml new file mode 100644 index 0000000..cb66204 --- /dev/null +++ b/mmo/accountsrv/logger.xml @@ -0,0 +1,22 @@ + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/mmo/accountsrv/main.go b/mmo/accountsrv/main.go new file mode 100644 index 0000000..493cc9f --- /dev/null +++ b/mmo/accountsrv/main.go @@ -0,0 +1,16 @@ +package main + +import ( + _ "mongo.games.com/goserver/mmo" + + "mongo.games.com/goserver/core" + "mongo.games.com/goserver/core/module" +) + +func main() { + defer core.ClosePackages() + core.LoadPackages("config.json") + + waiter := module.Start() + waiter.Wait("main") +} diff --git a/mmo/balancesrv/clientsessionhandler.go b/mmo/balancesrv/clientsessionhandler.go new file mode 100644 index 0000000..0d30b5a --- /dev/null +++ b/mmo/balancesrv/clientsessionhandler.go @@ -0,0 +1,101 @@ +package main + +import ( + "mongo.games.com/goserver/core/logger" + "mongo.games.com/goserver/core/netlib" + "mongo.games.com/goserver/mmo/protocol" + "mongo.games.com/goserver/srvlib" +) + +var ( + SessionHandlerClientBalanceName = "handler-client-balance" + SessionHandlerClientBalanceMgr = &SessionHandlerClientBalance{gates: make(map[int32]*gateService)} +) + +type gateService struct { + load int + active bool +} + +type SessionHandlerClientBalance struct { + netlib.BasicSessionHandler + gates map[int32]*gateService +} + +func (sfcb SessionHandlerClientBalance) GetName() string { + return SessionHandlerClientBalanceName +} + +func (sfcb *SessionHandlerClientBalance) GetInterestOps() uint { + return 1 << netlib.InterestOps_Opened +} + +func (sfcb *SessionHandlerClientBalance) OnSessionOpened(s *netlib.Session) { + logger.Logger.Trace("SessionHandlerClientBalance.OnSessionOpened") + services := srvlib.ServiceMgr.GetServices(srvlib.ClientServiceType) + if services != nil { + /*清理掉线的gate*/ + for k, _ := range sfcb.gates { + if _, has := services[k]; !has { + logger.Logger.Trace("gate leave: ", k) + delete(sfcb.gates, k) + } + } + /*补充新上线的gate*/ + for k, v := range services { + if _, has := sfcb.gates[k]; !has { + sfcb.gates[k] = &gateService{active: true} + logger.Logger.Trace("new gate come in: ", k, v) + } + } + } + + /*查找最小负载的gate*/ + var minsrvid int32 + var min = 100000 + for k, v := range sfcb.gates { + if v.active && v.load < min { + minsrvid = k + min = v.load + } + } + + pack := &protocol.SCGateInfo{} + if mls, has := services[minsrvid]; has { + pack.SrvType = mls.GetSrvType() + pack.SrvId = mls.GetSrvId() + pack.AuthKey = mls.GetAuthKey() + pack.Ip = mls.GetOuterIp() + pack.Port = mls.GetPort() + } + s.Send(int(protocol.MmoPacketID_PACKET_SC_GATEINFO), pack) + logger.Logger.Trace(pack) + s.Close() +} + +func init() { + netlib.RegisteSessionHandlerCreator(SessionHandlerClientBalanceName, func() netlib.SessionHandler { + return SessionHandlerClientBalanceMgr + }) + + netlib.RegisterFactory(int(protocol.MmoPacketID_PACKET_GB_CUR_LOAD), netlib.PacketFactoryWrapper(func() interface{} { + return &protocol.ServerLoad{} + })) + + netlib.RegisterHandler(int(protocol.MmoPacketID_PACKET_GB_CUR_LOAD), netlib.HandlerWrapper(func(s *netlib.Session, packetid int, pack interface{}) error { + if sr, ok := pack.(*protocol.ServerLoad); ok { + srvid := sr.GetSrvId() + if v, has := SessionHandlerClientBalanceMgr.gates[srvid]; has { + v.load = int(sr.GetCurLoad()) + logger.Logger.Trace("receive gate load info 1, sid=", srvid, " load=", v.load) + } else { + services := srvlib.ServiceMgr.GetServices(srvlib.ClientServiceType) + if _, has := services[srvid]; has { + SessionHandlerClientBalanceMgr.gates[srvid] = &gateService{active: true, load: int(sr.GetCurLoad())} + logger.Logger.Trace("receive gate load info 2, sid=", srvid, " load=", sr.GetCurLoad()) + } + } + } + return nil + })) +} diff --git a/mmo/balancesrv/config.json b/mmo/balancesrv/config.json new file mode 100644 index 0000000..9ce61bb --- /dev/null +++ b/mmo/balancesrv/config.json @@ -0,0 +1,126 @@ +{ + "netlib": { + "SrvInfo":{ + "Name": "BalanceServer", + "Type": 2, + "Id": 201, + "AreaID": 1, + "Banner": [ + "=================", + "balance server", + "=================" + ] + }, + + "IoServices": [ + { + "Id": 101, + "Type": 1, + "AreaId": 1, + "Name": "ClientService", + "Ip": "", + "Port": 11001, + "MaxDone": 20, + "MaxPend": 20, + "MaxPacket": 1024, + "MaxConn": 10000, + "RcvBuff": 1024, + "SndBuff": 1024, + "WriteTimeout": 10, + "ReadTimeout": 10, + "SoLinger": 10, + "IsInnerLink": false, + "NoDelay": true, + "SupportFragment": true, + "AuthKey": "1234567890", + "FilterChain": ["session-filter-trace","session-filter-auth"], + "HandlerChain": ["handler-client-balance"] + }, + { + "Id": 201, + "Type": 2, + "AreaId": 1, + "Name": "BalanceService", + "Ip": "127.0.0.1", + "Port": 2001, + "MaxDone": 20, + "MaxPend": 20, + "MaxPacket": 65535, + "MaxConn": 10000, + "RcvBuff": 8192, + "SndBuff": 8192, + "WriteTimeout": 30, + "ReadTimeout": 30, + "IsInnerLink": true, + "NoDelay": true, + "SupportFragment": true, + "AuthKey": "1234567890", + "FilterChain": ["session-filter-auth","session-filter-keepalive"], + "HandlerChain": ["session-srv-registe","srv-service-handler"] + }, + { + "Id": 501, + "Type": 5, + "AreaId": 1, + "Name": "ManagerService", + "Ip": "127.0.0.1", + "Port": 5555, + "MaxDone": 20, + "MaxPend": 20, + "MaxPacket": 65535, + "MaxConn": 10000, + "RcvBuff": 8192, + "SndBuff": 8192, + "WriteTimeout": 30, + "ReadTimeout": 30, + "IsInnerLink": true, + "NoDelay": true, + "IsClient": true, + "SupportFragment": true, + "AuthKey": "1234567890", + "FilterChain": ["session-filter-auth","session-filter-keepalive"], + "HandlerChain": ["session-srv-registe","srv-service-handler"] + } + ] + }, + + "module": { + "Options": { + "QueueBacklog": 1024, + "MaxDone": 1024, + "Interval": 100 + } + }, + + "executor": { + "Options": { + "QueueBacklog": 1024, + "MaxDone": 1024, + "Interval": 0 + }, + "Worker": { + "WorkerCnt": 8, + "Options": { + "QueueBacklog": 1024, + "MaxDone": 1024, + "Interval": 0 + } + } + }, + + "timer": { + "Options": { + "QueueBacklog": 1024, + "MaxDone": 1024, + "Interval": 100 + } + }, + + "core": { + "MaxProcs": 4 + }, + + "cmdline": { + "SupportCmdLine": true + } +} \ No newline at end of file diff --git a/mmo/balancesrv/doc.go b/mmo/balancesrv/doc.go new file mode 100644 index 0000000..2fd0ff9 --- /dev/null +++ b/mmo/balancesrv/doc.go @@ -0,0 +1,5 @@ +package main + +// Load balancing server in traditional sense. +// Responsibilities: +// 1:according to the load on each gatesrv, select the current load a minimum. diff --git a/mmo/balancesrv/logger.xml b/mmo/balancesrv/logger.xml new file mode 100644 index 0000000..cb66204 --- /dev/null +++ b/mmo/balancesrv/logger.xml @@ -0,0 +1,22 @@ + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/mmo/balancesrv/main.go b/mmo/balancesrv/main.go new file mode 100644 index 0000000..493cc9f --- /dev/null +++ b/mmo/balancesrv/main.go @@ -0,0 +1,16 @@ +package main + +import ( + _ "mongo.games.com/goserver/mmo" + + "mongo.games.com/goserver/core" + "mongo.games.com/goserver/core/module" +) + +func main() { + defer core.ClosePackages() + core.LoadPackages("config.json") + + waiter := module.Start() + waiter.Wait("main") +} diff --git a/mmo/build.bat b/mmo/build.bat new file mode 100644 index 0000000..9064974 --- /dev/null +++ b/mmo/build.bat @@ -0,0 +1,13 @@ +cd accountsrv +go build +cd ../balancesrv +go build +cd ../gamesrv +go build +cd ../gatesrv +go build +cd ../mgrsrv +go build +cd ../worldsrv +go build +pause \ No newline at end of file diff --git a/mmo/clean.bat b/mmo/clean.bat new file mode 100644 index 0000000..8f23704 --- /dev/null +++ b/mmo/clean.bat @@ -0,0 +1,6 @@ +del /F/S accountsrv\accountsrv.exe +del /F/S balancesrv\balancesrv.exe +del /F/S gamesrv\gamesrv.exe +del /F/S gatesrv\gatesrv.exe +del /F/S mgrsrv\mgrsrv.exe +del /F/S worldsrv\worldsrv.exe \ No newline at end of file diff --git a/mmo/client/config.json b/mmo/client/config.json new file mode 100644 index 0000000..dd408fd --- /dev/null +++ b/mmo/client/config.json @@ -0,0 +1,82 @@ +{ + "netlib": { + "SrvInfo":{ + "Name": "Client", + "Type": 1, + "Id": 1, + "AreaID": 1, + "Banner": [ + "=================", + "client", + "=================" + ] + }, + + "IoServices": [ + { + "Id": 101, + "Type": 1, + "AreaId": 1, + "Name": "ClientService", + "Ip": "127.0.0.1", + "Port": 11001, + "MaxDone": 20, + "MaxPend": 20, + "MaxPacket": 1024, + "MaxConn": 1, + "RcvBuff": 1024, + "SndBuff": 1024, + "WriteTimeout": 30, + "ReadTimeout": 30, + "SoLinger": 10, + "IsInnerLink": false, + "IsClient": true, + "NoDelay": true, + "SupportFragment": true, + "AuthKey": "1234567890", + "FilterChain": ["session-filter-trace","session-filter-auth"], + "HandlerChain": [] + } + ] + }, + + "module": { + "Options": { + "QueueBacklog": 1024, + "MaxDone": 1024, + "Interval": 100 + } + }, + + "executor": { + "Options": { + "QueueBacklog": 1024, + "MaxDone": 1024, + "Interval": 0 + }, + "Worker": { + "WorkerCnt": 8, + "Options": { + "QueueBacklog": 1024, + "MaxDone": 1024, + "Interval": 0 + } + } + }, + + "timer": { + "Options": { + "QueueBacklog": 1024, + "MaxDone": 1024, + "Interval": 100 + } + }, + + "core": { + "MaxProcs": 4 + }, + + "cmdline": { + "SupportCmdLine": true + } +} \ No newline at end of file diff --git a/mmo/client/logger.xml b/mmo/client/logger.xml new file mode 100644 index 0000000..cb66204 --- /dev/null +++ b/mmo/client/logger.xml @@ -0,0 +1,22 @@ + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/mmo/client/main.go b/mmo/client/main.go new file mode 100644 index 0000000..493cc9f --- /dev/null +++ b/mmo/client/main.go @@ -0,0 +1,16 @@ +package main + +import ( + _ "mongo.games.com/goserver/mmo" + + "mongo.games.com/goserver/core" + "mongo.games.com/goserver/core/module" +) + +func main() { + defer core.ClosePackages() + core.LoadPackages("config.json") + + waiter := module.Start() + waiter.Wait("main") +} diff --git a/mmo/client/packet_scgateinfo.go b/mmo/client/packet_scgateinfo.go new file mode 100644 index 0000000..3d1d805 --- /dev/null +++ b/mmo/client/packet_scgateinfo.go @@ -0,0 +1,42 @@ +package main + +import ( + "mongo.games.com/goserver/core/logger" + "mongo.games.com/goserver/core/netlib" + "mongo.games.com/goserver/mmo/protocol" +) + +func init() { + netlib.RegisterFactory(int(protocol.MmoPacketID_PACKET_SC_GATEINFO), netlib.PacketFactoryWrapper(func() interface{} { + return &protocol.SCGateInfo{} + })) + netlib.RegisterHandler(int(protocol.MmoPacketID_PACKET_SC_GATEINFO), netlib.HandlerWrapper(func(s *netlib.Session, packetid int, pack interface{}) error { + logger.Logger.Trace("receive gateinfo==", pack) + if sr, ok := pack.(*protocol.SCGateInfo); ok { + sc := &netlib.SessionConfig{ + Id: int(sr.GetSrvId()), + Type: int(sr.GetSrvType()), + Ip: sr.GetIp(), + Port: int(sr.GetPort()), + AuthKey: sr.GetAuthKey(), + WriteTimeout: 30, + ReadTimeout: 30, + IdleTimeout: 30, + MaxDone: 20, + MaxPend: 20, + MaxPacket: 1024, + RcvBuff: 1024, + SndBuff: 1024, + IsClient: true, + NoDelay: true, + FilterChain: []string{"session-filter-trace", "session-filter-auth"}, + } + sc.Init() + err := netlib.Connect(sc) + if err != nil { + logger.Logger.Warn("connect server failed err:", err) + } + } + return nil + })) +} diff --git a/mmo/close.bat b/mmo/close.bat new file mode 100644 index 0000000..06a6e7e --- /dev/null +++ b/mmo/close.bat @@ -0,0 +1,6 @@ +TASKKILL /F /IM accountsrv.exe +TASKKILL /F /IM balancesrv.exe +TASKKILL /F /IM gamesrv.exe +TASKKILL /F /IM gatesrv.exe +TASKKILL /F /IM mgrsrv.exe +TASKKILL /F /IM worldsrv.exe \ No newline at end of file diff --git a/mmo/clrlogs.bat b/mmo/clrlogs.bat new file mode 100644 index 0000000..0f3f20b --- /dev/null +++ b/mmo/clrlogs.bat @@ -0,0 +1,6 @@ +del /F/S accountsrv\all.log +del /F/S balancesrv\all.log +del /F/S gamesrv\all.log +del /F/S gatesrv\all.log +del /F/S mgrsrv\all.log +del /F/S worldsrv\all.log \ No newline at end of file diff --git a/mmo/gamesrv/config.json b/mmo/gamesrv/config.json new file mode 100644 index 0000000..a6ff07b --- /dev/null +++ b/mmo/gamesrv/config.json @@ -0,0 +1,103 @@ +{ + "netlib": { + "SrvInfo":{ + "Name": "GameServer", + "Type": 7, + "Id": 701, + "AreaID": 1, + "Banner": [ + "=================", + "game server", + "=================" + ] + }, + + "IoServices": [ + { + "Id": 701, + "Type": 7, + "AreaId": 1, + "Name": "GameService", + "Ip": "127.0.0.1", + "Port": 7001, + "MaxDone": 20, + "MaxPend": 20, + "MaxPacket": 65535, + "MaxConn": 10000, + "RcvBuff": 8192, + "SndBuff": 8192, + "WriteTimeout": 30, + "ReadTimeout": 30, + "IsInnerLink": true, + "NoDelay": true, + "SupportFragment": true, + "AuthKey": "1234567890", + "FilterChain": ["session-filter-auth","session-filter-keepalive"], + "HandlerChain": ["session-srv-registe"] + }, + { + "Id": 501, + "Type": 5, + "AreaId": 1, + "Name": "ManagerService", + "Ip": "127.0.0.1", + "Port": 5555, + "MaxDone": 20, + "MaxPend": 20, + "MaxPacket": 65535, + "MaxConn": 10000, + "RcvBuff": 8192, + "SndBuff": 8192, + "WriteTimeout": 30, + "ReadTimeout": 30, + "IsInnerLink": true, + "NoDelay": true, + "IsClient": true, + "SupportFragment": true, + "AuthKey": "1234567890", + "FilterChain": ["session-filter-auth","session-filter-keepalive"], + "HandlerChain": ["session-srv-registe","srv-service-handler"] + } + ] + }, + + "module": { + "Options": { + "QueueBacklog": 1024, + "MaxDone": 1024, + "Interval": 100 + } + }, + + "executor": { + "Options": { + "QueueBacklog": 1024, + "MaxDone": 1024, + "Interval": 0 + }, + "Worker": { + "WorkerCnt": 8, + "Options": { + "QueueBacklog": 1024, + "MaxDone": 1024, + "Interval": 0 + } + } + }, + + "timer": { + "Options": { + "QueueBacklog": 1024, + "MaxDone": 1024, + "Interval": 100 + } + }, + + "core": { + "MaxProcs": 4 + }, + + "cmdline": { + "SupportCmdLine": true + } +} \ No newline at end of file diff --git a/mmo/gamesrv/doc.go b/mmo/gamesrv/doc.go new file mode 100644 index 0000000..6a03a6f --- /dev/null +++ b/mmo/gamesrv/doc.go @@ -0,0 +1,5 @@ +package main + +// Game logic server. +// Responsibilities: +// 1:The logic is mainly responsible for the game. diff --git a/mmo/gamesrv/logger.xml b/mmo/gamesrv/logger.xml new file mode 100644 index 0000000..cb66204 --- /dev/null +++ b/mmo/gamesrv/logger.xml @@ -0,0 +1,22 @@ + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/mmo/gamesrv/main.go b/mmo/gamesrv/main.go new file mode 100644 index 0000000..493cc9f --- /dev/null +++ b/mmo/gamesrv/main.go @@ -0,0 +1,16 @@ +package main + +import ( + _ "mongo.games.com/goserver/mmo" + + "mongo.games.com/goserver/core" + "mongo.games.com/goserver/core/module" +) + +func main() { + defer core.ClosePackages() + core.LoadPackages("config.json") + + waiter := module.Start() + waiter.Wait("main") +} diff --git a/mmo/gatesrv/broadcasthandler.go b/mmo/gatesrv/broadcasthandler.go new file mode 100644 index 0000000..bf333e7 --- /dev/null +++ b/mmo/gatesrv/broadcasthandler.go @@ -0,0 +1,61 @@ +package main + +import ( + "google.golang.org/protobuf/proto" + "mongo.games.com/goserver/core/logger" + "mongo.games.com/goserver/core/netlib" + "mongo.games.com/goserver/srvlib" + "mongo.games.com/goserver/srvlib/protocol" +) + +var ( + BroadcastMaker = &BroadcastPacketFactory{} +) + +type BroadcastPacketFactory struct { +} + +type BroadcastHandler struct { +} + +func init() { + netlib.RegisterHandler(int(protocol.SrvlibPacketID_PACKET_SS_BROADCAST), &BroadcastHandler{}) + netlib.RegisterFactory(int(protocol.SrvlibPacketID_PACKET_SS_BROADCAST), BroadcastMaker) +} + +func (this *BroadcastPacketFactory) CreatePacket() interface{} { + pack := &protocol.SSPacketBroadcast{} + return pack +} + +func (this *BroadcastPacketFactory) CreateBroadcastPacket(sp *protocol.BCSessionUnion, packetid int, data interface{}) (proto.Message, error) { + pack := &protocol.SSPacketBroadcast{ + SessParam: sp, + PacketId: int32(packetid), + } + if byteData, ok := data.([]byte); ok { + pack.Data = byteData + } else { + byteData, err := netlib.MarshalPacket(packetid, data) + if err == nil { + pack.Data = byteData + } else { + logger.Logger.Warn("BroadcastPacketFactory.CreateBroadcastPacket err:", err) + return nil, err + } + } + return pack, nil +} + +func (this *BroadcastHandler) Process(s *netlib.Session, packetid int, data interface{}) error { + if bp, ok := data.(*protocol.SSPacketBroadcast); ok { + pd := bp.GetData() + sp := bp.GetSessParam() + if bcss := sp.GetBcss(); bcss != nil { + srvlib.ServerSessionMgrSington.Broadcast(int(bp.GetPacketId()), pd, int(bcss.GetSArea()), int(bcss.GetSType())) + } else { + BundleMgrSington.Broadcast(int(bp.GetPacketId()), pd) + } + } + return nil +} diff --git a/mmo/gatesrv/bundlemgr.go b/mmo/gatesrv/bundlemgr.go new file mode 100644 index 0000000..1e4dfd3 --- /dev/null +++ b/mmo/gatesrv/bundlemgr.go @@ -0,0 +1,264 @@ +package main + +import ( + "fmt" + "math" + "sync/atomic" + "time" + + "mongo.games.com/goserver/core/module" + "mongo.games.com/goserver/core/netlib" + "mongo.games.com/goserver/srvlib" +) + +var ( + SessionHandlerBundleName = "handler-client-bundle" + BundleMgrSington = &BundleMgr{} +) + +type SendPendItem struct { + sid int //当前pending包发给的session + packetid int + logicNo uint32 + data interface{} + ts int64 +} + +type BundleSession struct { + bundleKey string + bundleSession []*netlib.Session + currentSession *netlib.Session + fastestSession *netlib.Session + worldsrvSession *netlib.Session + gamesrvSession *netlib.Session + waitAckChain []*SendPendItem + rcvLogicNo uint32 + sndLogicNo uint32 + lastSndTs int64 + lastAckTs int64 + RTT int64 //Round Trip Time + RTO int64 //Retransmission TimeOut +} + +func (bs *BundleSession) OnSessionClose(s *netlib.Session) bool { + if bs.fastestSession == s { + bs.fastestSession = nil + } + if bs.currentSession == s { + bs.currentSession = nil + } + cnt := len(bs.bundleSession) + for i := 0; i < cnt; i++ { + if bs.bundleSession[i] == s { + if i == 0 { + bs.bundleSession = bs.bundleSession[1:] + } else if i == cnt-1 { + bs.bundleSession = bs.bundleSession[:cnt-1] + } else { + temp := bs.bundleSession[:i] + temp = append(temp, bs.bundleSession[i+1:]...) + bs.bundleSession = temp + } + break + } + } + //优先最快的一个连接 + if bs.currentSession == nil { + bs.currentSession = bs.fastestSession + } + //从池里面挑选一个 + if bs.currentSession == nil { + if len(bs.bundleSession) > 0 { + bs.currentSession = bs.bundleSession[0] + } + } + + //重发没有回应的包 + if bs.currentSession != nil { + idx := -1 + for i, pack := range bs.waitAckChain { + if pack.sid == s.Id { + bs.currentSession.SendEx(pack.packetid, pack.logicNo, pack.data, false) + idx = i + } else { + break + } + } + if idx != -1 { + if idx < len(bs.waitAckChain) { + bs.waitAckChain = bs.waitAckChain[idx+1:] + } + } + } else { //所有连接都已关闭 + return true + } + return false +} + +func (bs *BundleSession) CacheSendItem(sid, packetid int, logicNo uint32, data interface{}) { + ts := module.AppModule.GetCurrTimeNano() + bs.waitAckChain = append(bs.waitAckChain, &SendPendItem{ + sid: sid, + packetid: packetid, + logicNo: logicNo, + data: data, + ts: ts, + }) + bs.lastSndTs = ts +} + +func (bs *BundleSession) Send(packetid int, pack interface{}) { + if bs.currentSession != nil { + logicNo := atomic.AddUint32(&bs.sndLogicNo, 1) + if bs.currentSession.SendEx(packetid, logicNo, pack, false) { + bs.CacheSendItem(bs.currentSession.Id, packetid, logicNo, pack) + } + } +} + +type BundleMgr struct { + freeBundle []uint16 + bundles [math.MaxUint16]*BundleSession + bundlesMap map[string]uint16 + Debug bool +} + +// 主线程中调用 +func (bm *BundleMgr) AllocBundleId() uint16 { + last := len(bm.freeBundle) + if last > 0 { + id := bm.freeBundle[last-1] + bm.freeBundle = bm.freeBundle[:last-1] + return id + } + return 0 +} + +// 主线程中调用 +func (bm *BundleMgr) FreeBundleId(id uint16) { + if bm.Debug { + for i := 0; i < len(bm.freeBundle); i++ { + if bm.freeBundle[i] == id { + panic(fmt.Sprintf("BundleMgr.FreeBundleId found repeat id:%v", id)) + } + } + } + bm.freeBundle = append(bm.freeBundle, id) + delete(bm.bundlesMap, bm.bundles[id].bundleKey) + bm.bundles[id] = nil +} + +// 主线程中调用 +func (bm *BundleMgr) BindSession(bundleId uint16, s *netlib.Session) { + if bm.bundles[bundleId] == nil { + bm.bundles[bundleId] = &BundleSession{ + RTO: int64(time.Second), + } + } + bs := bm.bundles[bundleId] + if bs != nil { + s.GroupId = int(bundleId) + param := s.GetAttribute(srvlib.SessionAttributeClientSession) + if param != nil { + if sid, ok := param.(srvlib.SessionId); ok { + s.Sid = int64(srvlib.NewSessionIdEx(int32(sid.AreaId()), int32(sid.SrvType()), int32(sid.SrvId()), int32(bundleId))) + } + } + + bs.bundleSession = append(bs.bundleSession, s) + if bs.fastestSession == nil { + bs.fastestSession = s + } + if bs.currentSession == nil { + bs.currentSession = s + } + } +} + +func (bm *BundleMgr) GetBundleSession(bundleId uint16) *BundleSession { + return bm.bundles[bundleId] +} + +// 主线程中调用 +func (bm *BundleMgr) OnSessionAck(s *netlib.Session, logicNo uint32) { + if s != nil { + bs := bm.GetBundleSession(uint16(s.GroupId)) + if bs != nil { + if len(bs.waitAckChain) > 0 && bs.waitAckChain[0].logicNo == logicNo { + pendingItem := bs.waitAckChain[0] + bs.waitAckChain = bs.waitAckChain[1:] + bs.fastestSession = s + if len(bs.waitAckChain) == 0 { //切换响应更快的链接 + bs.currentSession = bs.fastestSession + } + bs.lastAckTs = module.AppModule.GetCurrTimeNano() + bs.RTT = bs.lastAckTs - pendingItem.ts + bs.RTO = bs.RTT * 5 + if bs.RTO > int64(time.Second) { //最长1秒 + bs.RTO = int64(time.Second) + } + } + } + } +} + +func (bm *BundleMgr) Broadcast(packetid int, pack interface{}) { + for _, bid := range bm.bundlesMap { + bs := bm.GetBundleSession(bid) + if bs != nil { + bs.Send(packetid, pack) + } + } +} + +func (bm *BundleMgr) ModuleName() string { + return "BundleMgr" +} + +func (bm *BundleMgr) Init() { + bm.freeBundle = make([]uint16, 0, math.MaxUint16) + for i := uint16(math.MaxUint16); i > 0; i-- { + bm.freeBundle = append(bm.freeBundle, i) + } + bm.bundlesMap = make(map[string]uint16) +} + +func (bm *BundleMgr) Update() { + ts := module.AppModule.GetCurrTimeNano() + for _, id := range bm.bundlesMap { + bs := bm.GetBundleSession(id) + if bs != nil { + if len(bs.waitAckChain) > 0 && ts-bs.waitAckChain[0].ts > bs.RTO { //重发 + s := bs.fastestSession + if s == bs.currentSession { + for _, ss := range bs.bundleSession { + if s != ss { + s = ss + break + } + } + } + if s != bs.currentSession { + bs.currentSession = s + chain := bs.waitAckChain + bs.waitAckChain = nil + for _, item := range chain { + if ts-item.ts > bs.RTO { + if bs.currentSession.SendEx(item.packetid, item.logicNo, item.data, false) { + bs.CacheSendItem(bs.currentSession.Id, item.packetid, item.logicNo, item.data) + } + } + } + } + } + } + } +} + +func (bm *BundleMgr) Shutdown() { + module.UnregisteModule(bm) +} + +func init() { + module.RegisteModule(BundleMgrSington, time.Millisecond*100, 0) +} diff --git a/mmo/gatesrv/clientsessionload.go b/mmo/gatesrv/clientsessionload.go new file mode 100644 index 0000000..84023fa --- /dev/null +++ b/mmo/gatesrv/clientsessionload.go @@ -0,0 +1,50 @@ +package main + +import ( + "mongo.games.com/goserver/core/logger" + "mongo.games.com/goserver/core/netlib" + "mongo.games.com/goserver/mmo/protocol" + "mongo.games.com/goserver/srvlib" +) + +var ( + SessionHandlerClientLoadName = "handler-client-load" +) + +type SessionHandlerClientLoad struct { + netlib.BasicSessionHandler +} + +func (sfcl SessionHandlerClientLoad) GetName() string { + return SessionHandlerClientLoadName +} + +func (sfcl *SessionHandlerClientLoad) GetInterestOps() uint { + return 1< + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/mmo/gatesrv/main.go b/mmo/gatesrv/main.go new file mode 100644 index 0000000..493cc9f --- /dev/null +++ b/mmo/gatesrv/main.go @@ -0,0 +1,16 @@ +package main + +import ( + _ "mongo.games.com/goserver/mmo" + + "mongo.games.com/goserver/core" + "mongo.games.com/goserver/core/module" +) + +func main() { + defer core.ClosePackages() + core.LoadPackages("config.json") + + waiter := module.Start() + waiter.Wait("main") +} diff --git a/mmo/gatesrv/multicasthandler.go b/mmo/gatesrv/multicasthandler.go new file mode 100644 index 0000000..3698fed --- /dev/null +++ b/mmo/gatesrv/multicasthandler.go @@ -0,0 +1,75 @@ +package main + +import ( + "google.golang.org/protobuf/proto" + "mongo.games.com/goserver/core/logger" + "mongo.games.com/goserver/core/netlib" + "mongo.games.com/goserver/srvlib" + "mongo.games.com/goserver/srvlib/protocol" +) + +var ( + MulticastMaker = &MulticastPacketFactory{} +) + +type MulticastPacketFactory struct { +} + +type MulticastHandler struct { +} + +func init() { + netlib.RegisterHandler(int(protocol.SrvlibPacketID_PACKET_SS_MULTICAST), &MulticastHandler{}) + netlib.RegisterFactory(int(protocol.SrvlibPacketID_PACKET_SS_MULTICAST), MulticastMaker) +} + +func (this *MulticastPacketFactory) CreatePacket() interface{} { + pack := &protocol.SSPacketMulticast{} + return pack +} + +func (this *MulticastPacketFactory) CreateMulticastPacket(packetid int, data interface{}, sis ...*protocol.MCSessionUnion) (proto.Message, error) { + pack := &protocol.SSPacketMulticast{ + Sessions: sis, + PacketId: int32(packetid), + } + if byteData, ok := data.([]byte); ok { + pack.Data = byteData + } else { + byteData, err := netlib.MarshalPacket(packetid, data) + if err == nil { + pack.Data = byteData + } else { + logger.Logger.Info("MulticastPacketFactory.CreateMulticastPacket err:", err) + return nil, err + } + } + return pack, nil +} + +func (this *MulticastHandler) Process(s *netlib.Session, packetid int, data interface{}) error { + if mp, ok := data.(*protocol.SSPacketMulticast); ok { + pd := mp.GetData() + sis := mp.GetSessions() + for _, si := range sis { + cs := si.GetMccs() + if cs != nil { + sid := srvlib.SessionId(cs.GetSId()) + bundleId := sid.SeqId() + bs := BundleMgrSington.GetBundleSession(uint16(bundleId)) + if bs != nil { + bs.Send(int(mp.GetPacketId()), pd) + } + } else { + ss := si.GetMcss() + if ss != nil { + ns := srvlib.ServerSessionMgrSington.GetSession(int(ss.GetSArea()), int(ss.GetSType()), int(ss.GetSId())) + if ns != nil { + ns.Send(int(mp.GetPacketId()), pd /*, s.GetSessionConfig().IsInnerLink*/) + } + } + } + } + } + return nil +} diff --git a/mmo/gatesrv/packetdispatchhandler.go b/mmo/gatesrv/packetdispatchhandler.go new file mode 100644 index 0000000..c91e650 --- /dev/null +++ b/mmo/gatesrv/packetdispatchhandler.go @@ -0,0 +1,55 @@ +package main + +// +//import ( +// "bytes" +// "sync/atomic" +// +// "google.golang.org/protobuf/proto" +// "games.jiexunjiayin.com/jxjyqp/protocol" +// "mongo.games.com/goserver/core/builtin/filter" +// "mongo.games.com/goserver/core/logger" +// "mongo.games.com/goserver/core/netlib" +//) +// +//func init() { +// netlib.RegisteErrorPacketHandlerCreator("packetdispatchhandler", func() netlib.ErrorPacketHandler { +// return netlib.ErrorPacketHandlerWrapper(func(s *netlib.Session, packetid int, logicNo uint32, data []byte) bool { +// if s.GetAttribute(filter.SessionAttributeAuth) == nil { +// logger.Logger.Trace("packetdispatchhandler session not auth! ") +// return false +// } +// +// bs := BundleMgrSington.GetBundleSession(uint16(s.GroupId)) +// if bs == nil { +// logger.Logger.Trace("packetdispatchhandler BundleSession is nil! ") +// return false +// } +// +// if atomic.CompareAndSwapUint32(&bs.rcvLogicNo, logicNo-1, logicNo) { +// var ss *netlib.Session +// if packetid >= 2000 && packetid < 3000 { +// ss = bs.worldsrvSession +// } else { +// ss = bs.gamesrvSession +// } +// if ss == nil { +// logger.Logger.Trace("packetdispatchhandler redirect server session is nil ", packetid) +// return true +// } +// //must copy +// buf := bytes.NewBuffer(nil) +// buf.Write(data) +// pack := &protocol.SSTransmit{ +// SessionId: proto.Int64(s.Sid), +// PacketData: buf.Bytes(), +// } +// ss.Send(int(protocol.MmoPacketID_PACKET_SS_PACKET_TRANSMIT), pack) +// return true +// } +// +// //丢掉 +// return false +// }) +// }) +//} diff --git a/mmo/gen_go.bat b/mmo/gen_go.bat new file mode 100644 index 0000000..128e44b --- /dev/null +++ b/mmo/gen_go.bat @@ -0,0 +1,13 @@ +@echo off +set work_path=%cd% +set proto_path=%cd%\protocol +set protoc3=%cd%\..\bin\protoc-3.5.1-win32\bin\protoc.exe +set protoc-gen-go-plugin-path="%cd%\..\bin\protoc-gen-go.exe" + +cd %proto_path% + for %%b in (,*.proto) do ( + echo %%b + %protoc3% --plugin=protoc-gen-go=%protoc-gen-go-plugin-path% --go_out=. %%b + ) + cd .. +pause \ No newline at end of file diff --git a/mmo/imports.go b/mmo/imports.go new file mode 100644 index 0000000..ed43ea6 --- /dev/null +++ b/mmo/imports.go @@ -0,0 +1,11 @@ +package mmo + +import ( + _ "mongo.games.com/goserver/core/builtin/action" + _ "mongo.games.com/goserver/core/builtin/filter" + _ "mongo.games.com/goserver/core/cmdline" + _ "mongo.games.com/goserver/core/netlib" + _ "mongo.games.com/goserver/core/signal" + _ "mongo.games.com/goserver/srvlib/action" + _ "mongo.games.com/goserver/srvlib/handler" +) diff --git a/mmo/mgrsrv/config.json b/mmo/mgrsrv/config.json new file mode 100644 index 0000000..ee75f2e --- /dev/null +++ b/mmo/mgrsrv/config.json @@ -0,0 +1,80 @@ +{ + "netlib": { + "SrvInfo":{ + "Name": "ManagerServer", + "Type": 5, + "Id": 501, + "AreaID": 1, + "Banner": [ + "=================", + "manager server", + "=================" + ] + }, + + "IoServices": [ + { + "Id": 501, + "Type": 5, + "AreaId": 1, + "Name": "ManagerService", + "Ip": "", + "Port": 5555, + "MaxDone": 20, + "MaxPend": 20, + "MaxPacket": 65535, + "MaxConn": 10000, + "RcvBuff": 8192, + "SndBuff": 8192, + "WriteTimeout": 30, + "ReadTimeout": 30, + "IsInnerLink": true, + "NoDelay": true, + "SupportFragment": true, + "AuthKey": "1234567890", + "FilterChain": ["session-filter-auth","session-filter-keepalive"], + "HandlerChain": ["session-srv-registe","srv-service-handler"] + } + ] + }, + + "module": { + "Options": { + "QueueBacklog": 1024, + "MaxDone": 1024, + "Interval": 100 + } + }, + + "executor": { + "Options": { + "QueueBacklog": 1024, + "MaxDone": 1024, + "Interval": 0 + }, + "Worker": { + "WorkerCnt": 8, + "Options": { + "QueueBacklog": 1024, + "MaxDone": 1024, + "Interval": 0 + } + } + }, + + "timer": { + "Options": { + "QueueBacklog": 1024, + "MaxDone": 1024, + "Interval": 100 + } + }, + + "core": { + "MaxProcs": 4 + }, + + "cmdline": { + "SupportCmdLine": true + } +} \ No newline at end of file diff --git a/mmo/mgrsrv/doc.go b/mmo/mgrsrv/doc.go new file mode 100644 index 0000000..f7a2363 --- /dev/null +++ b/mmo/mgrsrv/doc.go @@ -0,0 +1,5 @@ +package main + +// Responsibilities: +// 1:responsible for managing the relationship between the server group. +// 2:message forwarding. diff --git a/mmo/mgrsrv/logger.xml b/mmo/mgrsrv/logger.xml new file mode 100644 index 0000000..cb66204 --- /dev/null +++ b/mmo/mgrsrv/logger.xml @@ -0,0 +1,22 @@ + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/mmo/mgrsrv/main.go b/mmo/mgrsrv/main.go new file mode 100644 index 0000000..493cc9f --- /dev/null +++ b/mmo/mgrsrv/main.go @@ -0,0 +1,16 @@ +package main + +import ( + _ "mongo.games.com/goserver/mmo" + + "mongo.games.com/goserver/core" + "mongo.games.com/goserver/core/module" +) + +func main() { + defer core.ClosePackages() + core.LoadPackages("config.json") + + waiter := module.Start() + waiter.Wait("main") +} diff --git a/mmo/protocol/gateinfo.pb.go b/mmo/protocol/gateinfo.pb.go new file mode 100644 index 0000000..cb065a8 --- /dev/null +++ b/mmo/protocol/gateinfo.pb.go @@ -0,0 +1,313 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.25.0-devel +// protoc v3.5.1 +// source: gateinfo.proto + +package protocol + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type SCGateInfo struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + SrvType int32 `protobuf:"varint,1,opt,name=SrvType,proto3" json:"SrvType,omitempty"` + SrvId int32 `protobuf:"varint,2,opt,name=SrvId,proto3" json:"SrvId,omitempty"` + AuthKey string `protobuf:"bytes,3,opt,name=AuthKey,proto3" json:"AuthKey,omitempty"` + Ip string `protobuf:"bytes,4,opt,name=Ip,proto3" json:"Ip,omitempty"` + Port int32 `protobuf:"varint,5,opt,name=Port,proto3" json:"Port,omitempty"` +} + +func (x *SCGateInfo) Reset() { + *x = SCGateInfo{} + if protoimpl.UnsafeEnabled { + mi := &file_gateinfo_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *SCGateInfo) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SCGateInfo) ProtoMessage() {} + +func (x *SCGateInfo) ProtoReflect() protoreflect.Message { + mi := &file_gateinfo_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SCGateInfo.ProtoReflect.Descriptor instead. +func (*SCGateInfo) Descriptor() ([]byte, []int) { + return file_gateinfo_proto_rawDescGZIP(), []int{0} +} + +func (x *SCGateInfo) GetSrvType() int32 { + if x != nil { + return x.SrvType + } + return 0 +} + +func (x *SCGateInfo) GetSrvId() int32 { + if x != nil { + return x.SrvId + } + return 0 +} + +func (x *SCGateInfo) GetAuthKey() string { + if x != nil { + return x.AuthKey + } + return "" +} + +func (x *SCGateInfo) GetIp() string { + if x != nil { + return x.Ip + } + return "" +} + +func (x *SCGateInfo) GetPort() int32 { + if x != nil { + return x.Port + } + return 0 +} + +type CSSessionBundle struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Key string `protobuf:"bytes,1,opt,name=Key,proto3" json:"Key,omitempty"` + Ts int64 `protobuf:"varint,2,opt,name=Ts,proto3" json:"Ts,omitempty"` +} + +func (x *CSSessionBundle) Reset() { + *x = CSSessionBundle{} + if protoimpl.UnsafeEnabled { + mi := &file_gateinfo_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CSSessionBundle) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CSSessionBundle) ProtoMessage() {} + +func (x *CSSessionBundle) ProtoReflect() protoreflect.Message { + mi := &file_gateinfo_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CSSessionBundle.ProtoReflect.Descriptor instead. +func (*CSSessionBundle) Descriptor() ([]byte, []int) { + return file_gateinfo_proto_rawDescGZIP(), []int{1} +} + +func (x *CSSessionBundle) GetKey() string { + if x != nil { + return x.Key + } + return "" +} + +func (x *CSSessionBundle) GetTs() int64 { + if x != nil { + return x.Ts + } + return 0 +} + +type CSSessionAck struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + LogicNo uint32 `protobuf:"varint,1,opt,name=LogicNo,proto3" json:"LogicNo,omitempty"` +} + +func (x *CSSessionAck) Reset() { + *x = CSSessionAck{} + if protoimpl.UnsafeEnabled { + mi := &file_gateinfo_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CSSessionAck) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CSSessionAck) ProtoMessage() {} + +func (x *CSSessionAck) ProtoReflect() protoreflect.Message { + mi := &file_gateinfo_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CSSessionAck.ProtoReflect.Descriptor instead. +func (*CSSessionAck) Descriptor() ([]byte, []int) { + return file_gateinfo_proto_rawDescGZIP(), []int{2} +} + +func (x *CSSessionAck) GetLogicNo() uint32 { + if x != nil { + return x.LogicNo + } + return 0 +} + +var File_gateinfo_proto protoreflect.FileDescriptor + +var file_gateinfo_proto_rawDesc = []byte{ + 0x0a, 0x0e, 0x67, 0x61, 0x74, 0x65, 0x69, 0x6e, 0x66, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x12, 0x08, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x22, 0x7a, 0x0a, 0x0a, 0x53, 0x43, + 0x47, 0x61, 0x74, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x18, 0x0a, 0x07, 0x53, 0x72, 0x76, 0x54, + 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x07, 0x53, 0x72, 0x76, 0x54, 0x79, + 0x70, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x53, 0x72, 0x76, 0x49, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x05, 0x52, 0x05, 0x53, 0x72, 0x76, 0x49, 0x64, 0x12, 0x18, 0x0a, 0x07, 0x41, 0x75, 0x74, 0x68, + 0x4b, 0x65, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x41, 0x75, 0x74, 0x68, 0x4b, + 0x65, 0x79, 0x12, 0x0e, 0x0a, 0x02, 0x49, 0x70, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, + 0x49, 0x70, 0x12, 0x12, 0x0a, 0x04, 0x50, 0x6f, 0x72, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x05, + 0x52, 0x04, 0x50, 0x6f, 0x72, 0x74, 0x22, 0x33, 0x0a, 0x0f, 0x43, 0x53, 0x53, 0x65, 0x73, 0x73, + 0x69, 0x6f, 0x6e, 0x42, 0x75, 0x6e, 0x64, 0x6c, 0x65, 0x12, 0x10, 0x0a, 0x03, 0x4b, 0x65, 0x79, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x4b, 0x65, 0x79, 0x12, 0x0e, 0x0a, 0x02, 0x54, + 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x02, 0x54, 0x73, 0x22, 0x28, 0x0a, 0x0c, 0x43, + 0x53, 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x41, 0x63, 0x6b, 0x12, 0x18, 0x0a, 0x07, 0x4c, + 0x6f, 0x67, 0x69, 0x63, 0x4e, 0x6f, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x07, 0x4c, 0x6f, + 0x67, 0x69, 0x63, 0x4e, 0x6f, 0x42, 0x0c, 0x5a, 0x0a, 0x2e, 0x3b, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x63, 0x6f, 0x6c, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_gateinfo_proto_rawDescOnce sync.Once + file_gateinfo_proto_rawDescData = file_gateinfo_proto_rawDesc +) + +func file_gateinfo_proto_rawDescGZIP() []byte { + file_gateinfo_proto_rawDescOnce.Do(func() { + file_gateinfo_proto_rawDescData = protoimpl.X.CompressGZIP(file_gateinfo_proto_rawDescData) + }) + return file_gateinfo_proto_rawDescData +} + +var file_gateinfo_proto_msgTypes = make([]protoimpl.MessageInfo, 3) +var file_gateinfo_proto_goTypes = []interface{}{ + (*SCGateInfo)(nil), // 0: protocol.SCGateInfo + (*CSSessionBundle)(nil), // 1: protocol.CSSessionBundle + (*CSSessionAck)(nil), // 2: protocol.CSSessionAck +} +var file_gateinfo_proto_depIdxs = []int32{ + 0, // [0:0] is the sub-list for method output_type + 0, // [0:0] is the sub-list for method input_type + 0, // [0:0] is the sub-list for extension type_name + 0, // [0:0] is the sub-list for extension extendee + 0, // [0:0] is the sub-list for field type_name +} + +func init() { file_gateinfo_proto_init() } +func file_gateinfo_proto_init() { + if File_gateinfo_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_gateinfo_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SCGateInfo); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_gateinfo_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CSSessionBundle); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_gateinfo_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CSSessionAck); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_gateinfo_proto_rawDesc, + NumEnums: 0, + NumMessages: 3, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_gateinfo_proto_goTypes, + DependencyIndexes: file_gateinfo_proto_depIdxs, + MessageInfos: file_gateinfo_proto_msgTypes, + }.Build() + File_gateinfo_proto = out.File + file_gateinfo_proto_rawDesc = nil + file_gateinfo_proto_goTypes = nil + file_gateinfo_proto_depIdxs = nil +} diff --git a/mmo/protocol/gateinfo.proto b/mmo/protocol/gateinfo.proto new file mode 100644 index 0000000..70a42da --- /dev/null +++ b/mmo/protocol/gateinfo.proto @@ -0,0 +1,21 @@ +syntax = "proto3"; +package protocol; +option go_package = ".;protocol"; + + +message SCGateInfo { + int32 SrvType = 1; + int32 SrvId = 2; + string AuthKey = 3; + string Ip = 4; + int32 Port = 5; +} + +message CSSessionBundle { + string Key = 1; + int64 Ts = 2; +} + +message CSSessionAck { + uint32 LogicNo = 1; +} \ No newline at end of file diff --git a/mmo/protocol/packetid.pb.go b/mmo/protocol/packetid.pb.go new file mode 100644 index 0000000..edae338 --- /dev/null +++ b/mmo/protocol/packetid.pb.go @@ -0,0 +1,147 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.25.0-devel +// protoc v3.5.1 +// source: packetid.proto + +package protocol + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type MmoPacketID int32 + +const ( + MmoPacketID_PACKET_GB_UNKNOW MmoPacketID = 0 + MmoPacketID_PACKET_GB_CUR_LOAD MmoPacketID = 1000 + MmoPacketID_PACKET_GB_STATE_SWITCH MmoPacketID = 1001 + MmoPacketID_PACKET_SC_GATEINFO MmoPacketID = 1002 + MmoPacketID_PACKET_CS_SESSIONBUNDLE MmoPacketID = 1003 + MmoPacketID_PACKET_CS_SESSIONACK MmoPacketID = 1004 +) + +// Enum value maps for MmoPacketID. +var ( + MmoPacketID_name = map[int32]string{ + 0: "PACKET_GB_UNKNOW", + 1000: "PACKET_GB_CUR_LOAD", + 1001: "PACKET_GB_STATE_SWITCH", + 1002: "PACKET_SC_GATEINFO", + 1003: "PACKET_CS_SESSIONBUNDLE", + 1004: "PACKET_CS_SESSIONACK", + } + MmoPacketID_value = map[string]int32{ + "PACKET_GB_UNKNOW": 0, + "PACKET_GB_CUR_LOAD": 1000, + "PACKET_GB_STATE_SWITCH": 1001, + "PACKET_SC_GATEINFO": 1002, + "PACKET_CS_SESSIONBUNDLE": 1003, + "PACKET_CS_SESSIONACK": 1004, + } +) + +func (x MmoPacketID) Enum() *MmoPacketID { + p := new(MmoPacketID) + *p = x + return p +} + +func (x MmoPacketID) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (MmoPacketID) Descriptor() protoreflect.EnumDescriptor { + return file_packetid_proto_enumTypes[0].Descriptor() +} + +func (MmoPacketID) Type() protoreflect.EnumType { + return &file_packetid_proto_enumTypes[0] +} + +func (x MmoPacketID) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use MmoPacketID.Descriptor instead. +func (MmoPacketID) EnumDescriptor() ([]byte, []int) { + return file_packetid_proto_rawDescGZIP(), []int{0} +} + +var File_packetid_proto protoreflect.FileDescriptor + +var file_packetid_proto_rawDesc = []byte{ + 0x0a, 0x0e, 0x70, 0x61, 0x63, 0x6b, 0x65, 0x74, 0x69, 0x64, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x12, 0x08, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x2a, 0xab, 0x01, 0x0a, 0x0b, 0x4d, + 0x6d, 0x6f, 0x50, 0x61, 0x63, 0x6b, 0x65, 0x74, 0x49, 0x44, 0x12, 0x14, 0x0a, 0x10, 0x50, 0x41, + 0x43, 0x4b, 0x45, 0x54, 0x5f, 0x47, 0x42, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x10, 0x00, + 0x12, 0x17, 0x0a, 0x12, 0x50, 0x41, 0x43, 0x4b, 0x45, 0x54, 0x5f, 0x47, 0x42, 0x5f, 0x43, 0x55, + 0x52, 0x5f, 0x4c, 0x4f, 0x41, 0x44, 0x10, 0xe8, 0x07, 0x12, 0x1b, 0x0a, 0x16, 0x50, 0x41, 0x43, + 0x4b, 0x45, 0x54, 0x5f, 0x47, 0x42, 0x5f, 0x53, 0x54, 0x41, 0x54, 0x45, 0x5f, 0x53, 0x57, 0x49, + 0x54, 0x43, 0x48, 0x10, 0xe9, 0x07, 0x12, 0x17, 0x0a, 0x12, 0x50, 0x41, 0x43, 0x4b, 0x45, 0x54, + 0x5f, 0x53, 0x43, 0x5f, 0x47, 0x41, 0x54, 0x45, 0x49, 0x4e, 0x46, 0x4f, 0x10, 0xea, 0x07, 0x12, + 0x1c, 0x0a, 0x17, 0x50, 0x41, 0x43, 0x4b, 0x45, 0x54, 0x5f, 0x43, 0x53, 0x5f, 0x53, 0x45, 0x53, + 0x53, 0x49, 0x4f, 0x4e, 0x42, 0x55, 0x4e, 0x44, 0x4c, 0x45, 0x10, 0xeb, 0x07, 0x12, 0x19, 0x0a, + 0x14, 0x50, 0x41, 0x43, 0x4b, 0x45, 0x54, 0x5f, 0x43, 0x53, 0x5f, 0x53, 0x45, 0x53, 0x53, 0x49, + 0x4f, 0x4e, 0x41, 0x43, 0x4b, 0x10, 0xec, 0x07, 0x42, 0x0c, 0x5a, 0x0a, 0x2e, 0x3b, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_packetid_proto_rawDescOnce sync.Once + file_packetid_proto_rawDescData = file_packetid_proto_rawDesc +) + +func file_packetid_proto_rawDescGZIP() []byte { + file_packetid_proto_rawDescOnce.Do(func() { + file_packetid_proto_rawDescData = protoimpl.X.CompressGZIP(file_packetid_proto_rawDescData) + }) + return file_packetid_proto_rawDescData +} + +var file_packetid_proto_enumTypes = make([]protoimpl.EnumInfo, 1) +var file_packetid_proto_goTypes = []interface{}{ + (MmoPacketID)(0), // 0: protocol.MmoPacketID +} +var file_packetid_proto_depIdxs = []int32{ + 0, // [0:0] is the sub-list for method output_type + 0, // [0:0] is the sub-list for method input_type + 0, // [0:0] is the sub-list for extension type_name + 0, // [0:0] is the sub-list for extension extendee + 0, // [0:0] is the sub-list for field type_name +} + +func init() { file_packetid_proto_init() } +func file_packetid_proto_init() { + if File_packetid_proto != nil { + return + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_packetid_proto_rawDesc, + NumEnums: 1, + NumMessages: 0, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_packetid_proto_goTypes, + DependencyIndexes: file_packetid_proto_depIdxs, + EnumInfos: file_packetid_proto_enumTypes, + }.Build() + File_packetid_proto = out.File + file_packetid_proto_rawDesc = nil + file_packetid_proto_goTypes = nil + file_packetid_proto_depIdxs = nil +} diff --git a/mmo/protocol/packetid.proto b/mmo/protocol/packetid.proto new file mode 100644 index 0000000..2321207 --- /dev/null +++ b/mmo/protocol/packetid.proto @@ -0,0 +1,12 @@ +syntax = "proto3"; +package protocol; +option go_package = ".;protocol"; + +enum MmoPacketID { + PACKET_GB_UNKNOW = 0; + PACKET_GB_CUR_LOAD = 1000; + PACKET_GB_STATE_SWITCH = 1001; + PACKET_SC_GATEINFO = 1002; + PACKET_CS_SESSIONBUNDLE = 1003; + PACKET_CS_SESSIONACK = 1004; +} \ No newline at end of file diff --git a/mmo/protocol/serverload.pb.go b/mmo/protocol/serverload.pb.go new file mode 100644 index 0000000..7fb3ecc --- /dev/null +++ b/mmo/protocol/serverload.pb.go @@ -0,0 +1,233 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.25.0-devel +// protoc v3.5.1 +// source: serverload.proto + +package protocol + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type ServerLoad struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + SrvType int32 `protobuf:"varint,1,opt,name=SrvType,proto3" json:"SrvType,omitempty"` + SrvId int32 `protobuf:"varint,2,opt,name=SrvId,proto3" json:"SrvId,omitempty"` + CurLoad int32 `protobuf:"varint,3,opt,name=CurLoad,proto3" json:"CurLoad,omitempty"` +} + +func (x *ServerLoad) Reset() { + *x = ServerLoad{} + if protoimpl.UnsafeEnabled { + mi := &file_serverload_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ServerLoad) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ServerLoad) ProtoMessage() {} + +func (x *ServerLoad) ProtoReflect() protoreflect.Message { + mi := &file_serverload_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ServerLoad.ProtoReflect.Descriptor instead. +func (*ServerLoad) Descriptor() ([]byte, []int) { + return file_serverload_proto_rawDescGZIP(), []int{0} +} + +func (x *ServerLoad) GetSrvType() int32 { + if x != nil { + return x.SrvType + } + return 0 +} + +func (x *ServerLoad) GetSrvId() int32 { + if x != nil { + return x.SrvId + } + return 0 +} + +func (x *ServerLoad) GetCurLoad() int32 { + if x != nil { + return x.CurLoad + } + return 0 +} + +type ServerStateSwitch struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + SrvType int32 `protobuf:"varint,1,opt,name=SrvType,proto3" json:"SrvType,omitempty"` + SrvId int32 `protobuf:"varint,2,opt,name=SrvId,proto3" json:"SrvId,omitempty"` +} + +func (x *ServerStateSwitch) Reset() { + *x = ServerStateSwitch{} + if protoimpl.UnsafeEnabled { + mi := &file_serverload_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ServerStateSwitch) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ServerStateSwitch) ProtoMessage() {} + +func (x *ServerStateSwitch) ProtoReflect() protoreflect.Message { + mi := &file_serverload_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ServerStateSwitch.ProtoReflect.Descriptor instead. +func (*ServerStateSwitch) Descriptor() ([]byte, []int) { + return file_serverload_proto_rawDescGZIP(), []int{1} +} + +func (x *ServerStateSwitch) GetSrvType() int32 { + if x != nil { + return x.SrvType + } + return 0 +} + +func (x *ServerStateSwitch) GetSrvId() int32 { + if x != nil { + return x.SrvId + } + return 0 +} + +var File_serverload_proto protoreflect.FileDescriptor + +var file_serverload_proto_rawDesc = []byte{ + 0x0a, 0x10, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x6c, 0x6f, 0x61, 0x64, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x12, 0x08, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x22, 0x56, 0x0a, 0x0a, + 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x4c, 0x6f, 0x61, 0x64, 0x12, 0x18, 0x0a, 0x07, 0x53, 0x72, + 0x76, 0x54, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x07, 0x53, 0x72, 0x76, + 0x54, 0x79, 0x70, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x53, 0x72, 0x76, 0x49, 0x64, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x05, 0x52, 0x05, 0x53, 0x72, 0x76, 0x49, 0x64, 0x12, 0x18, 0x0a, 0x07, 0x43, 0x75, + 0x72, 0x4c, 0x6f, 0x61, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, 0x52, 0x07, 0x43, 0x75, 0x72, + 0x4c, 0x6f, 0x61, 0x64, 0x22, 0x43, 0x0a, 0x11, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x53, 0x74, + 0x61, 0x74, 0x65, 0x53, 0x77, 0x69, 0x74, 0x63, 0x68, 0x12, 0x18, 0x0a, 0x07, 0x53, 0x72, 0x76, + 0x54, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x07, 0x53, 0x72, 0x76, 0x54, + 0x79, 0x70, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x53, 0x72, 0x76, 0x49, 0x64, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x05, 0x52, 0x05, 0x53, 0x72, 0x76, 0x49, 0x64, 0x42, 0x0c, 0x5a, 0x0a, 0x2e, 0x3b, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_serverload_proto_rawDescOnce sync.Once + file_serverload_proto_rawDescData = file_serverload_proto_rawDesc +) + +func file_serverload_proto_rawDescGZIP() []byte { + file_serverload_proto_rawDescOnce.Do(func() { + file_serverload_proto_rawDescData = protoimpl.X.CompressGZIP(file_serverload_proto_rawDescData) + }) + return file_serverload_proto_rawDescData +} + +var file_serverload_proto_msgTypes = make([]protoimpl.MessageInfo, 2) +var file_serverload_proto_goTypes = []interface{}{ + (*ServerLoad)(nil), // 0: protocol.ServerLoad + (*ServerStateSwitch)(nil), // 1: protocol.ServerStateSwitch +} +var file_serverload_proto_depIdxs = []int32{ + 0, // [0:0] is the sub-list for method output_type + 0, // [0:0] is the sub-list for method input_type + 0, // [0:0] is the sub-list for extension type_name + 0, // [0:0] is the sub-list for extension extendee + 0, // [0:0] is the sub-list for field type_name +} + +func init() { file_serverload_proto_init() } +func file_serverload_proto_init() { + if File_serverload_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_serverload_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ServerLoad); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_serverload_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ServerStateSwitch); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_serverload_proto_rawDesc, + NumEnums: 0, + NumMessages: 2, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_serverload_proto_goTypes, + DependencyIndexes: file_serverload_proto_depIdxs, + MessageInfos: file_serverload_proto_msgTypes, + }.Build() + File_serverload_proto = out.File + file_serverload_proto_rawDesc = nil + file_serverload_proto_goTypes = nil + file_serverload_proto_depIdxs = nil +} diff --git a/mmo/protocol/serverload.proto b/mmo/protocol/serverload.proto new file mode 100644 index 0000000..9865610 --- /dev/null +++ b/mmo/protocol/serverload.proto @@ -0,0 +1,14 @@ +syntax = "proto3"; +package protocol; +option go_package = ".;protocol"; + +message ServerLoad { + int32 SrvType = 1; + int32 SrvId = 2; + int32 CurLoad = 3; +} + +message ServerStateSwitch { + int32 SrvType = 1; + int32 SrvId = 2; +} \ No newline at end of file diff --git a/mmo/startup.bat b/mmo/startup.bat new file mode 100644 index 0000000..055dd40 --- /dev/null +++ b/mmo/startup.bat @@ -0,0 +1,12 @@ +cd accountsrv +start accountsrv +cd ../balancesrv +start balancesrv +cd ../gamesrv +start gamesrv +cd ../gatesrv +start gatesrv +cd ../mgrsrv +start mgrsrv +cd ../worldsrv +start worldsrv \ No newline at end of file diff --git a/mmo/worldsrv/config.json b/mmo/worldsrv/config.json new file mode 100644 index 0000000..4c780f6 --- /dev/null +++ b/mmo/worldsrv/config.json @@ -0,0 +1,103 @@ +{ + "netlib": { + "SrvInfo":{ + "Name": "WorldServer", + "Type": 6, + "Id": 601, + "AreaID": 1, + "Banner": [ + "=================", + "world server", + "=================" + ] + }, + + "IoServices": [ + { + "Id": 601, + "Type": 6, + "AreaId": 1, + "Name": "WorldService", + "Ip": "127.0.0.1", + "Port": 6001, + "MaxDone": 20, + "MaxPend": 20, + "MaxPacket": 65535, + "MaxConn": 10000, + "RcvBuff": 8192, + "SndBuff": 8192, + "WriteTimeout": 30, + "ReadTimeout": 30, + "IsInnerLink": true, + "NoDelay": true, + "SupportFragment": true, + "AuthKey": "1234567890", + "FilterChain": ["session-filter-auth","session-filter-keepalive"], + "HandlerChain": ["session-srv-registe"] + }, + { + "Id": 501, + "Type": 5, + "AreaId": 1, + "Name": "ManagerService", + "Ip": "127.0.0.1", + "Port": 5555, + "MaxDone": 20, + "MaxPend": 20, + "MaxPacket": 65535, + "MaxConn": 10000, + "RcvBuff": 8192, + "SndBuff": 8192, + "WriteTimeout": 30, + "ReadTimeout": 30, + "IsInnerLink": true, + "NoDelay": true, + "IsClient": true, + "SupportFragment": true, + "AuthKey": "1234567890", + "FilterChain": ["session-filter-auth","session-filter-keepalive"], + "HandlerChain": ["session-srv-registe","srv-service-handler"] + } + ] + }, + + "module": { + "Options": { + "QueueBacklog": 1024, + "MaxDone": 1024, + "Interval": 100 + } + }, + + "executor": { + "Options": { + "QueueBacklog": 1024, + "MaxDone": 1024, + "Interval": 0 + }, + "Worker": { + "WorkerCnt": 8, + "Options": { + "QueueBacklog": 1024, + "MaxDone": 1024, + "Interval": 0 + } + } + }, + + "timer": { + "Options": { + "QueueBacklog": 1024, + "MaxDone": 1024, + "Interval": 100 + } + }, + + "core": { + "MaxProcs": 4 + }, + + "cmdline": { + "SupportCmdLine": true + } +} \ No newline at end of file diff --git a/mmo/worldsrv/doc.go b/mmo/worldsrv/doc.go new file mode 100644 index 0000000..0af1cf0 --- /dev/null +++ b/mmo/worldsrv/doc.go @@ -0,0 +1,6 @@ +package main + +// Game world server(The whole world only). +// Responsibilities: +// 1:Responsible for the overall business in the game. +// For example: social relations diff --git a/mmo/worldsrv/logger.xml b/mmo/worldsrv/logger.xml new file mode 100644 index 0000000..cb66204 --- /dev/null +++ b/mmo/worldsrv/logger.xml @@ -0,0 +1,22 @@ + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/mmo/worldsrv/main.go b/mmo/worldsrv/main.go new file mode 100644 index 0000000..493cc9f --- /dev/null +++ b/mmo/worldsrv/main.go @@ -0,0 +1,16 @@ +package main + +import ( + _ "mongo.games.com/goserver/mmo" + + "mongo.games.com/goserver/core" + "mongo.games.com/goserver/core/module" +) + +func main() { + defer core.ClosePackages() + core.LoadPackages("config.json") + + waiter := module.Start() + waiter.Wait("main") +} diff --git a/srvlib/action/redirecthandler.go b/srvlib/action/redirecthandler.go new file mode 100644 index 0000000..42184fb --- /dev/null +++ b/srvlib/action/redirecthandler.go @@ -0,0 +1,44 @@ +package action + +import ( + "mongo.games.com/goserver/core/logger" + "mongo.games.com/goserver/core/netlib" + "mongo.games.com/goserver/srvlib" + "mongo.games.com/goserver/srvlib/protocol" +) + +type PacketRedirectPacketFactory struct { +} + +type PacketRedirectHandler struct { +} + +func init() { + netlib.RegisterHandler(int(protocol.SrvlibPacketID_PACKET_SS_REDIRECT), &PacketRedirectHandler{}) + netlib.RegisterFactory(int(protocol.SrvlibPacketID_PACKET_SS_REDIRECT), &PacketRedirectPacketFactory{}) +} + +func (this *PacketRedirectPacketFactory) CreatePacket() interface{} { + pack := &protocol.SSPacketRedirect{} + return pack +} + +func (this *PacketRedirectHandler) Process(s *netlib.Session, packetid int, data interface{}) error { + logger.Logger.Trace("PacketRedirectHandler.Process") + if pr, ok := data.(*protocol.SSPacketRedirect); ok { + packid, pack, err := netlib.UnmarshalPacket(pr.GetData()) + if err != nil { + return err + } + h := srvlib.GetHandler(packid) + if h != nil { + return h.Process(s, packid, pack, pr.GetClientSid(), pr.GetSrvRoutes()) + } else { + nh := netlib.GetHandler(packid) + if nh != nil { + return nh.Process(s, packid, pack) + } + } + } + return nil +} diff --git a/srvlib/action/transithandler.go b/srvlib/action/transithandler.go new file mode 100644 index 0000000..dd20a89 --- /dev/null +++ b/srvlib/action/transithandler.go @@ -0,0 +1,35 @@ +package action + +import ( + "mongo.games.com/goserver/core/logger" + "mongo.games.com/goserver/core/netlib" + "mongo.games.com/goserver/srvlib" + "mongo.games.com/goserver/srvlib/protocol" +) + +type PacketTransitPacketFactory struct { +} + +type PacketTransitHandler struct { +} + +func init() { + netlib.RegisterHandler(int(protocol.SrvlibPacketID_PACKET_SS_TRANSIT), &PacketTransitHandler{}) + netlib.RegisterFactory(int(protocol.SrvlibPacketID_PACKET_SS_TRANSIT), &PacketTransitPacketFactory{}) +} + +func (this *PacketTransitPacketFactory) CreatePacket() interface{} { + pack := &protocol.SSPacketTransit{} + return pack +} + +func (this *PacketTransitHandler) Process(s *netlib.Session, packetid int, data interface{}) error { + logger.Logger.Trace("PacketTransitHandler.Process") + if pr, ok := data.(*protocol.SSPacketTransit); ok { + targetS := srvlib.ServerSessionMgrSington.GetSession(int(pr.GetSArea()), int(pr.GetSType()), int(pr.GetSId())) + if targetS != nil { + targetS.Send(int(pr.GetPacketId()), pr.GetData()) + } + } + return nil +} diff --git a/srvlib/clientsessionmgr.go b/srvlib/clientsessionmgr.go new file mode 100644 index 0000000..3e3cad7 --- /dev/null +++ b/srvlib/clientsessionmgr.go @@ -0,0 +1,70 @@ +package srvlib + +import ( + "mongo.games.com/goserver/core/logger" + "mongo.games.com/goserver/core/netlib" +) + +/* + 所有连接(一般是玩家客户端连接) + sid->session +*/ + +var ( + SessionAttributeClientSession = &ClientSessionMgr{} + ClientSessionMgrSington = &ClientSessionMgr{sessions: make(map[int64]*netlib.Session)} +) + +type ClientSessionMgr struct { + sessions map[int64]*netlib.Session //keys=>sessionid +} + +func (csm *ClientSessionMgr) RegisteSession(s *netlib.Session) bool { + attr := s.GetAttribute(SessionAttributeClientSession) + if attr == nil { + sid := NewSessionId(s) + s.SetAttribute(SessionAttributeClientSession, sid) + csm.sessions[sid.Get()] = s + logger.Logger.Tracef("ClientSessionMgr(%p).RegisteSession client session %v registe", csm, sid.Get()) + } + return true +} + +func (csm *ClientSessionMgr) UnregisteSession(s *netlib.Session) bool { + attr := s.GetAttribute(SessionAttributeClientSession) + if attr != nil { + if sid, ok := attr.(SessionId); ok { + delete(csm.sessions, sid.Get()) + logger.Logger.Tracef("ClientSessionMgr(%p).UnregisteSession client session %v unregiste", csm, sid.Get()) + } + } + return true +} + +func (csm *ClientSessionMgr) GetSession(srvId int64) *netlib.Session { + if s, exist := csm.sessions[srvId]; exist { + return s + } + return nil +} + +func (csm *ClientSessionMgr) GetSessions() map[int64]*netlib.Session { + return csm.sessions +} + +func (csm *ClientSessionMgr) Broadcast(packetid int, pack interface{}) { + for _, s := range csm.sessions { + s.Send(packetid, pack) + } +} + +func (csm *ClientSessionMgr) Count() int { + return len(csm.sessions) +} + +func (csm *ClientSessionMgr) CloseAll() { + logger.Logger.Tracef("ClientSessionMgr(%p).CloseAll!!!!!!!!!!!! session's cnt=%v", csm, len(csm.sessions)) + for _, s := range csm.sessions { + s.Close() + } +} diff --git a/srvlib/const.go b/srvlib/const.go new file mode 100644 index 0000000..fa16b57 --- /dev/null +++ b/srvlib/const.go @@ -0,0 +1,27 @@ +package srvlib + +const ( + UnknowServiceType int = 0 + ClientServiceType = 1 + BalanceServiceType = 2 + AccountServiceType = 3 + GateServiceType = 4 + ManagerServiceType = 5 + WorldServiceType = 6 + GameServiceType = 7 + RankServiceType = 8 + MaxServiceType = 9 +) + +const ( + UnknowServerType int = 0 + _ + BalanceServerType = 2 + AccountServerType = 3 + GateServerType = 4 + ManagerServerType = 5 + WorldServerType = 6 + GameServerType = 7 + RankServerType = 8 + MaxServerType = 9 +) diff --git a/srvlib/gen_go.bat b/srvlib/gen_go.bat new file mode 100644 index 0000000..128e44b --- /dev/null +++ b/srvlib/gen_go.bat @@ -0,0 +1,13 @@ +@echo off +set work_path=%cd% +set proto_path=%cd%\protocol +set protoc3=%cd%\..\bin\protoc-3.5.1-win32\bin\protoc.exe +set protoc-gen-go-plugin-path="%cd%\..\bin\protoc-gen-go.exe" + +cd %proto_path% + for %%b in (,*.proto) do ( + echo %%b + %protoc3% --plugin=protoc-gen-go=%protoc-gen-go-plugin-path% --go_out=. %%b + ) + cd .. +pause \ No newline at end of file diff --git a/srvlib/handler.go b/srvlib/handler.go new file mode 100644 index 0000000..feb89bd --- /dev/null +++ b/srvlib/handler.go @@ -0,0 +1,49 @@ +package srvlib + +import ( + "fmt" + "reflect" + + "mongo.games.com/goserver/core/netlib" + "mongo.games.com/goserver/srvlib/protocol" +) + +var handlers = make(map[int]Handler) + +type Handler interface { + Process(s *netlib.Session, packetid int, data interface{}, sid int64, routes []*protocol.SrvInfo) error +} + +type HandlerWrapper func(s *netlib.Session, packetid int, data interface{}, sid int64, routes []*protocol.SrvInfo) error + +func (hw HandlerWrapper) Process(s *netlib.Session, packetid int, data interface{}, sid int64, routes []*protocol.SrvInfo) error { + return hw(s, packetid, data, sid, routes) +} + +func RegisterHandler(packetId int, h Handler) { + if _, ok := handlers[packetId]; ok { + panic(fmt.Sprintf("repeate register handler: %v Handler type=%v", packetId, reflect.TypeOf(h))) + } + + handlers[packetId] = h +} + +func Register1ToMHandler(h Handler, packetIds ...int) { + for _, packetId := range packetIds { + RegisterHandler(packetId, h) + } +} + +func RegisterRangeHandler(start, end int, h Handler) { + for ; start <= end; start++ { + RegisterHandler(start, h) + } +} + +func GetHandler(packetId int) Handler { + if h, ok := handlers[packetId]; ok { + return h + } + + return nil +} diff --git a/srvlib/handler/clientsessionregiste.go b/srvlib/handler/clientsessionregiste.go new file mode 100644 index 0000000..606df5c --- /dev/null +++ b/srvlib/handler/clientsessionregiste.go @@ -0,0 +1,44 @@ +package handler + +import ( + "mongo.games.com/goserver/core/netlib" + "mongo.games.com/goserver/srvlib" +) + +var ( + SessionHandlerClientRegisteName = "session-client-registe" +) + +type SessionHandlerClientRegiste struct { +} + +func (sfcr SessionHandlerClientRegiste) GetName() string { + return SessionHandlerClientRegisteName +} + +func (sfl *SessionHandlerClientRegiste) GetInterestOps() uint { + return 1<", sc.Name) + /*报告自己的监听信息*/ + srvlib.ServiceMgr.ReportService(s) + } else { + // 这里标记只有监听端才会触发自动连接 + s.SetAttribute(srvlib.SessionAttributeServiceFlag, 1) + } +} + +func (this *SessionHandlerServiceRegiste) OnSessionClosed(s *netlib.Session) { + sc := s.GetSessionConfig() + if !sc.IsClient { + logger.Logger.Warn("SessionHandlerServiceRegiste:OnSessionClosed ClearServiceBySession") + srvlib.ServiceMgr.ClearServiceBySession(s) + } +} + +func (this *SessionHandlerServiceRegiste) OnSessionIdle(s *netlib.Session) { +} + +func (this *SessionHandlerServiceRegiste) OnPacketReceived(s *netlib.Session, packetid int, logicNo uint32, packet interface{}) { +} + +func (this *SessionHandlerServiceRegiste) OnPacketSent(s *netlib.Session, packetid int, logicNo uint32, data []byte) { +} + +func init() { + netlib.RegisteSessionHandlerCreator(ServiceHandlerServiceRegisteName, func() netlib.SessionHandler { + return &SessionHandlerServiceRegiste{} + }) +} diff --git a/srvlib/handler/serversessionregiste.go b/srvlib/handler/serversessionregiste.go new file mode 100644 index 0000000..c9f6469 --- /dev/null +++ b/srvlib/handler/serversessionregiste.go @@ -0,0 +1,70 @@ +package handler + +import ( + "mongo.games.com/goserver/core/logger" + "mongo.games.com/goserver/core/netlib" + "mongo.games.com/goserver/srvlib" + "mongo.games.com/goserver/srvlib/protocol" +) + +// 服务信息注册,将自己的服务信息发送给对方 + +var ( + SessionHandlerSrvRegisteName = "session-srv-registe" +) + +type SessionHandlerSrvRegiste struct { +} + +func (sfl SessionHandlerSrvRegiste) GetName() string { + return SessionHandlerSrvRegisteName +} + +func (sfl *SessionHandlerSrvRegiste) GetInterestOps() uint { + return 1< protocol.BCSessionUnion + 2, // 1: protocol.BCSessionUnion.Bccs:type_name -> protocol.BCClientSession + 3, // 2: protocol.BCSessionUnion.Bcss:type_name -> protocol.BCServerSession + 3, // [3:3] is the sub-list for method output_type + 3, // [3:3] is the sub-list for method input_type + 3, // [3:3] is the sub-list for extension type_name + 3, // [3:3] is the sub-list for extension extendee + 0, // [0:3] is the sub-list for field type_name +} + +func init() { file_broadcast_proto_init() } +func file_broadcast_proto_init() { + if File_broadcast_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_broadcast_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SSPacketBroadcast); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_broadcast_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*BCSessionUnion); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_broadcast_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*BCClientSession); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_broadcast_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*BCServerSession); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_broadcast_proto_rawDesc, + NumEnums: 0, + NumMessages: 4, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_broadcast_proto_goTypes, + DependencyIndexes: file_broadcast_proto_depIdxs, + MessageInfos: file_broadcast_proto_msgTypes, + }.Build() + File_broadcast_proto = out.File + file_broadcast_proto_rawDesc = nil + file_broadcast_proto_goTypes = nil + file_broadcast_proto_depIdxs = nil +} diff --git a/srvlib/protocol/broadcast.proto b/srvlib/protocol/broadcast.proto new file mode 100644 index 0000000..4279784 --- /dev/null +++ b/srvlib/protocol/broadcast.proto @@ -0,0 +1,23 @@ +syntax = "proto3"; +package protocol; +option go_package = ".;protocol"; + +message SSPacketBroadcast { + BCSessionUnion SessParam = 1; + int32 PacketId = 2; + bytes Data = 3; +} + +message BCSessionUnion { + BCClientSession Bccs = 1; + BCServerSession Bcss = 2; +} + +message BCClientSession { + bool Dummy = 1; +} + +message BCServerSession { + int32 SArea = 1; + int32 SType = 2; +} \ No newline at end of file diff --git a/srvlib/protocol/multicast.pb.go b/srvlib/protocol/multicast.pb.go new file mode 100644 index 0000000..7f1a8e5 --- /dev/null +++ b/srvlib/protocol/multicast.pb.go @@ -0,0 +1,385 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.25.0-devel +// protoc v3.5.1 +// source: multicast.proto + +package protocol + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type SSPacketMulticast struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Sessions []*MCSessionUnion `protobuf:"bytes,1,rep,name=Sessions,proto3" json:"Sessions,omitempty"` + PacketId int32 `protobuf:"varint,2,opt,name=PacketId,proto3" json:"PacketId,omitempty"` + Data []byte `protobuf:"bytes,3,opt,name=Data,proto3" json:"Data,omitempty"` +} + +func (x *SSPacketMulticast) Reset() { + *x = SSPacketMulticast{} + if protoimpl.UnsafeEnabled { + mi := &file_multicast_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *SSPacketMulticast) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SSPacketMulticast) ProtoMessage() {} + +func (x *SSPacketMulticast) ProtoReflect() protoreflect.Message { + mi := &file_multicast_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SSPacketMulticast.ProtoReflect.Descriptor instead. +func (*SSPacketMulticast) Descriptor() ([]byte, []int) { + return file_multicast_proto_rawDescGZIP(), []int{0} +} + +func (x *SSPacketMulticast) GetSessions() []*MCSessionUnion { + if x != nil { + return x.Sessions + } + return nil +} + +func (x *SSPacketMulticast) GetPacketId() int32 { + if x != nil { + return x.PacketId + } + return 0 +} + +func (x *SSPacketMulticast) GetData() []byte { + if x != nil { + return x.Data + } + return nil +} + +type MCSessionUnion struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Mccs *MCClientSession `protobuf:"bytes,1,opt,name=Mccs,proto3" json:"Mccs,omitempty"` + Mcss *MCServerSession `protobuf:"bytes,2,opt,name=Mcss,proto3" json:"Mcss,omitempty"` +} + +func (x *MCSessionUnion) Reset() { + *x = MCSessionUnion{} + if protoimpl.UnsafeEnabled { + mi := &file_multicast_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *MCSessionUnion) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*MCSessionUnion) ProtoMessage() {} + +func (x *MCSessionUnion) ProtoReflect() protoreflect.Message { + mi := &file_multicast_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use MCSessionUnion.ProtoReflect.Descriptor instead. +func (*MCSessionUnion) Descriptor() ([]byte, []int) { + return file_multicast_proto_rawDescGZIP(), []int{1} +} + +func (x *MCSessionUnion) GetMccs() *MCClientSession { + if x != nil { + return x.Mccs + } + return nil +} + +func (x *MCSessionUnion) GetMcss() *MCServerSession { + if x != nil { + return x.Mcss + } + return nil +} + +type MCClientSession struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + SId int64 `protobuf:"varint,1,opt,name=SId,proto3" json:"SId,omitempty"` +} + +func (x *MCClientSession) Reset() { + *x = MCClientSession{} + if protoimpl.UnsafeEnabled { + mi := &file_multicast_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *MCClientSession) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*MCClientSession) ProtoMessage() {} + +func (x *MCClientSession) ProtoReflect() protoreflect.Message { + mi := &file_multicast_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use MCClientSession.ProtoReflect.Descriptor instead. +func (*MCClientSession) Descriptor() ([]byte, []int) { + return file_multicast_proto_rawDescGZIP(), []int{2} +} + +func (x *MCClientSession) GetSId() int64 { + if x != nil { + return x.SId + } + return 0 +} + +type MCServerSession struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + SArea int32 `protobuf:"varint,1,opt,name=SArea,proto3" json:"SArea,omitempty"` + SType int32 `protobuf:"varint,2,opt,name=SType,proto3" json:"SType,omitempty"` + SId int32 `protobuf:"varint,3,opt,name=SId,proto3" json:"SId,omitempty"` +} + +func (x *MCServerSession) Reset() { + *x = MCServerSession{} + if protoimpl.UnsafeEnabled { + mi := &file_multicast_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *MCServerSession) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*MCServerSession) ProtoMessage() {} + +func (x *MCServerSession) ProtoReflect() protoreflect.Message { + mi := &file_multicast_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use MCServerSession.ProtoReflect.Descriptor instead. +func (*MCServerSession) Descriptor() ([]byte, []int) { + return file_multicast_proto_rawDescGZIP(), []int{3} +} + +func (x *MCServerSession) GetSArea() int32 { + if x != nil { + return x.SArea + } + return 0 +} + +func (x *MCServerSession) GetSType() int32 { + if x != nil { + return x.SType + } + return 0 +} + +func (x *MCServerSession) GetSId() int32 { + if x != nil { + return x.SId + } + return 0 +} + +var File_multicast_proto protoreflect.FileDescriptor + +var file_multicast_proto_rawDesc = []byte{ + 0x0a, 0x0f, 0x6d, 0x75, 0x6c, 0x74, 0x69, 0x63, 0x61, 0x73, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x12, 0x08, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x22, 0x79, 0x0a, 0x11, 0x53, + 0x53, 0x50, 0x61, 0x63, 0x6b, 0x65, 0x74, 0x4d, 0x75, 0x6c, 0x74, 0x69, 0x63, 0x61, 0x73, 0x74, + 0x12, 0x34, 0x0a, 0x08, 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x2e, 0x4d, 0x43, + 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x55, 0x6e, 0x69, 0x6f, 0x6e, 0x52, 0x08, 0x53, 0x65, + 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x1a, 0x0a, 0x08, 0x50, 0x61, 0x63, 0x6b, 0x65, 0x74, + 0x49, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x08, 0x50, 0x61, 0x63, 0x6b, 0x65, 0x74, + 0x49, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x44, 0x61, 0x74, 0x61, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, + 0x52, 0x04, 0x44, 0x61, 0x74, 0x61, 0x22, 0x6e, 0x0a, 0x0e, 0x4d, 0x43, 0x53, 0x65, 0x73, 0x73, + 0x69, 0x6f, 0x6e, 0x55, 0x6e, 0x69, 0x6f, 0x6e, 0x12, 0x2d, 0x0a, 0x04, 0x4d, 0x63, 0x63, 0x73, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, + 0x6c, 0x2e, 0x4d, 0x43, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, + 0x6e, 0x52, 0x04, 0x4d, 0x63, 0x63, 0x73, 0x12, 0x2d, 0x0a, 0x04, 0x4d, 0x63, 0x73, 0x73, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, + 0x2e, 0x4d, 0x43, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, + 0x52, 0x04, 0x4d, 0x63, 0x73, 0x73, 0x22, 0x23, 0x0a, 0x0f, 0x4d, 0x43, 0x43, 0x6c, 0x69, 0x65, + 0x6e, 0x74, 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x10, 0x0a, 0x03, 0x53, 0x49, 0x64, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x03, 0x53, 0x49, 0x64, 0x22, 0x4f, 0x0a, 0x0f, 0x4d, + 0x43, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x14, + 0x0a, 0x05, 0x53, 0x41, 0x72, 0x65, 0x61, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x53, + 0x41, 0x72, 0x65, 0x61, 0x12, 0x14, 0x0a, 0x05, 0x53, 0x54, 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x05, 0x52, 0x05, 0x53, 0x54, 0x79, 0x70, 0x65, 0x12, 0x10, 0x0a, 0x03, 0x53, 0x49, + 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, 0x52, 0x03, 0x53, 0x49, 0x64, 0x42, 0x0c, 0x5a, 0x0a, + 0x2e, 0x3b, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x33, +} + +var ( + file_multicast_proto_rawDescOnce sync.Once + file_multicast_proto_rawDescData = file_multicast_proto_rawDesc +) + +func file_multicast_proto_rawDescGZIP() []byte { + file_multicast_proto_rawDescOnce.Do(func() { + file_multicast_proto_rawDescData = protoimpl.X.CompressGZIP(file_multicast_proto_rawDescData) + }) + return file_multicast_proto_rawDescData +} + +var file_multicast_proto_msgTypes = make([]protoimpl.MessageInfo, 4) +var file_multicast_proto_goTypes = []interface{}{ + (*SSPacketMulticast)(nil), // 0: protocol.SSPacketMulticast + (*MCSessionUnion)(nil), // 1: protocol.MCSessionUnion + (*MCClientSession)(nil), // 2: protocol.MCClientSession + (*MCServerSession)(nil), // 3: protocol.MCServerSession +} +var file_multicast_proto_depIdxs = []int32{ + 1, // 0: protocol.SSPacketMulticast.Sessions:type_name -> protocol.MCSessionUnion + 2, // 1: protocol.MCSessionUnion.Mccs:type_name -> protocol.MCClientSession + 3, // 2: protocol.MCSessionUnion.Mcss:type_name -> protocol.MCServerSession + 3, // [3:3] is the sub-list for method output_type + 3, // [3:3] is the sub-list for method input_type + 3, // [3:3] is the sub-list for extension type_name + 3, // [3:3] is the sub-list for extension extendee + 0, // [0:3] is the sub-list for field type_name +} + +func init() { file_multicast_proto_init() } +func file_multicast_proto_init() { + if File_multicast_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_multicast_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SSPacketMulticast); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_multicast_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*MCSessionUnion); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_multicast_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*MCClientSession); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_multicast_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*MCServerSession); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_multicast_proto_rawDesc, + NumEnums: 0, + NumMessages: 4, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_multicast_proto_goTypes, + DependencyIndexes: file_multicast_proto_depIdxs, + MessageInfos: file_multicast_proto_msgTypes, + }.Build() + File_multicast_proto = out.File + file_multicast_proto_rawDesc = nil + file_multicast_proto_goTypes = nil + file_multicast_proto_depIdxs = nil +} diff --git a/srvlib/protocol/multicast.proto b/srvlib/protocol/multicast.proto new file mode 100644 index 0000000..b349938 --- /dev/null +++ b/srvlib/protocol/multicast.proto @@ -0,0 +1,24 @@ +syntax = "proto3"; +package protocol; +option go_package = ".;protocol"; + +message SSPacketMulticast { + repeated MCSessionUnion Sessions = 1; + int32 PacketId = 2; + bytes Data = 3; +} + +message MCSessionUnion { + MCClientSession Mccs = 1; + MCServerSession Mcss = 2; +} + +message MCClientSession { + int64 SId = 1; +} + +message MCServerSession { + int32 SArea = 1; + int32 SType = 2; + int32 SId = 3; +} \ No newline at end of file diff --git a/srvlib/protocol/redirect.pb.go b/srvlib/protocol/redirect.pb.go new file mode 100644 index 0000000..5af258b --- /dev/null +++ b/srvlib/protocol/redirect.pb.go @@ -0,0 +1,254 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.25.0-devel +// protoc v3.5.1 +// source: redirect.proto + +package protocol + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type SSPacketRedirect struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + ClientSid int64 `protobuf:"varint,1,opt,name=ClientSid,proto3" json:"ClientSid,omitempty"` + SrvRoutes []*SrvInfo `protobuf:"bytes,2,rep,name=SrvRoutes,proto3" json:"SrvRoutes,omitempty"` + PacketId int32 `protobuf:"varint,3,opt,name=PacketId,proto3" json:"PacketId,omitempty"` + Data []byte `protobuf:"bytes,4,opt,name=Data,proto3" json:"Data,omitempty"` +} + +func (x *SSPacketRedirect) Reset() { + *x = SSPacketRedirect{} + if protoimpl.UnsafeEnabled { + mi := &file_redirect_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *SSPacketRedirect) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SSPacketRedirect) ProtoMessage() {} + +func (x *SSPacketRedirect) ProtoReflect() protoreflect.Message { + mi := &file_redirect_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SSPacketRedirect.ProtoReflect.Descriptor instead. +func (*SSPacketRedirect) Descriptor() ([]byte, []int) { + return file_redirect_proto_rawDescGZIP(), []int{0} +} + +func (x *SSPacketRedirect) GetClientSid() int64 { + if x != nil { + return x.ClientSid + } + return 0 +} + +func (x *SSPacketRedirect) GetSrvRoutes() []*SrvInfo { + if x != nil { + return x.SrvRoutes + } + return nil +} + +func (x *SSPacketRedirect) GetPacketId() int32 { + if x != nil { + return x.PacketId + } + return 0 +} + +func (x *SSPacketRedirect) GetData() []byte { + if x != nil { + return x.Data + } + return nil +} + +type SrvInfo struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + SArea int32 `protobuf:"varint,1,opt,name=SArea,proto3" json:"SArea,omitempty"` + SType int32 `protobuf:"varint,2,opt,name=SType,proto3" json:"SType,omitempty"` + SId int32 `protobuf:"varint,3,opt,name=SId,proto3" json:"SId,omitempty"` +} + +func (x *SrvInfo) Reset() { + *x = SrvInfo{} + if protoimpl.UnsafeEnabled { + mi := &file_redirect_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *SrvInfo) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SrvInfo) ProtoMessage() {} + +func (x *SrvInfo) ProtoReflect() protoreflect.Message { + mi := &file_redirect_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SrvInfo.ProtoReflect.Descriptor instead. +func (*SrvInfo) Descriptor() ([]byte, []int) { + return file_redirect_proto_rawDescGZIP(), []int{1} +} + +func (x *SrvInfo) GetSArea() int32 { + if x != nil { + return x.SArea + } + return 0 +} + +func (x *SrvInfo) GetSType() int32 { + if x != nil { + return x.SType + } + return 0 +} + +func (x *SrvInfo) GetSId() int32 { + if x != nil { + return x.SId + } + return 0 +} + +var File_redirect_proto protoreflect.FileDescriptor + +var file_redirect_proto_rawDesc = []byte{ + 0x0a, 0x0e, 0x72, 0x65, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x12, 0x08, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x22, 0x91, 0x01, 0x0a, 0x10, 0x53, + 0x53, 0x50, 0x61, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x65, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x12, + 0x1c, 0x0a, 0x09, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x53, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x03, 0x52, 0x09, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x53, 0x69, 0x64, 0x12, 0x2f, 0x0a, + 0x09, 0x53, 0x72, 0x76, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x11, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x2e, 0x53, 0x72, 0x76, 0x49, + 0x6e, 0x66, 0x6f, 0x52, 0x09, 0x53, 0x72, 0x76, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x73, 0x12, 0x1a, + 0x0a, 0x08, 0x50, 0x61, 0x63, 0x6b, 0x65, 0x74, 0x49, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, + 0x52, 0x08, 0x50, 0x61, 0x63, 0x6b, 0x65, 0x74, 0x49, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x44, 0x61, + 0x74, 0x61, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x04, 0x44, 0x61, 0x74, 0x61, 0x22, 0x47, + 0x0a, 0x07, 0x53, 0x72, 0x76, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x14, 0x0a, 0x05, 0x53, 0x41, 0x72, + 0x65, 0x61, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x53, 0x41, 0x72, 0x65, 0x61, 0x12, + 0x14, 0x0a, 0x05, 0x53, 0x54, 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, + 0x53, 0x54, 0x79, 0x70, 0x65, 0x12, 0x10, 0x0a, 0x03, 0x53, 0x49, 0x64, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x05, 0x52, 0x03, 0x53, 0x49, 0x64, 0x42, 0x0c, 0x5a, 0x0a, 0x2e, 0x3b, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_redirect_proto_rawDescOnce sync.Once + file_redirect_proto_rawDescData = file_redirect_proto_rawDesc +) + +func file_redirect_proto_rawDescGZIP() []byte { + file_redirect_proto_rawDescOnce.Do(func() { + file_redirect_proto_rawDescData = protoimpl.X.CompressGZIP(file_redirect_proto_rawDescData) + }) + return file_redirect_proto_rawDescData +} + +var file_redirect_proto_msgTypes = make([]protoimpl.MessageInfo, 2) +var file_redirect_proto_goTypes = []interface{}{ + (*SSPacketRedirect)(nil), // 0: protocol.SSPacketRedirect + (*SrvInfo)(nil), // 1: protocol.SrvInfo +} +var file_redirect_proto_depIdxs = []int32{ + 1, // 0: protocol.SSPacketRedirect.SrvRoutes:type_name -> protocol.SrvInfo + 1, // [1:1] is the sub-list for method output_type + 1, // [1:1] is the sub-list for method input_type + 1, // [1:1] is the sub-list for extension type_name + 1, // [1:1] is the sub-list for extension extendee + 0, // [0:1] is the sub-list for field type_name +} + +func init() { file_redirect_proto_init() } +func file_redirect_proto_init() { + if File_redirect_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_redirect_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SSPacketRedirect); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_redirect_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SrvInfo); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_redirect_proto_rawDesc, + NumEnums: 0, + NumMessages: 2, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_redirect_proto_goTypes, + DependencyIndexes: file_redirect_proto_depIdxs, + MessageInfos: file_redirect_proto_msgTypes, + }.Build() + File_redirect_proto = out.File + file_redirect_proto_rawDesc = nil + file_redirect_proto_goTypes = nil + file_redirect_proto_depIdxs = nil +} diff --git a/srvlib/protocol/redirect.proto b/srvlib/protocol/redirect.proto new file mode 100644 index 0000000..a67d8f7 --- /dev/null +++ b/srvlib/protocol/redirect.proto @@ -0,0 +1,16 @@ +syntax = "proto3"; +package protocol; +option go_package = ".;protocol"; + +message SSPacketRedirect { + int64 ClientSid = 1; + repeated SrvInfo SrvRoutes = 2; + int32 PacketId = 3; + bytes Data = 4; +} + +message SrvInfo { + int32 SArea = 1; + int32 SType = 2; + int32 SId = 3; +} \ No newline at end of file diff --git a/srvlib/protocol/serviceinfo.pb.go b/srvlib/protocol/serviceinfo.pb.go new file mode 100644 index 0000000..090bdfd --- /dev/null +++ b/srvlib/protocol/serviceinfo.pb.go @@ -0,0 +1,642 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.25.0-devel +// protoc v3.5.1 +// source: serviceinfo.proto + +package protocol + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type ServiceInfo struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + AreaId int32 `protobuf:"varint,1,opt,name=AreaId,proto3" json:"AreaId,omitempty"` + SrvId int32 `protobuf:"varint,2,opt,name=SrvId,proto3" json:"SrvId,omitempty"` + SrvType int32 `protobuf:"varint,3,opt,name=SrvType,proto3" json:"SrvType,omitempty"` + SrvPID int32 `protobuf:"varint,4,opt,name=SrvPID,proto3" json:"SrvPID,omitempty"` + SrvName string `protobuf:"bytes,5,opt,name=SrvName,proto3" json:"SrvName,omitempty"` + NetworkType string `protobuf:"bytes,6,opt,name=NetworkType,proto3" json:"NetworkType,omitempty"` + Ip string `protobuf:"bytes,7,opt,name=Ip,proto3" json:"Ip,omitempty"` + Port int32 `protobuf:"varint,8,opt,name=Port,proto3" json:"Port,omitempty"` + WriteTimeOut int32 `protobuf:"varint,9,opt,name=WriteTimeOut,proto3" json:"WriteTimeOut,omitempty"` + ReadTimeOut int32 `protobuf:"varint,10,opt,name=ReadTimeOut,proto3" json:"ReadTimeOut,omitempty"` + IdleTimeOut int32 `protobuf:"varint,11,opt,name=IdleTimeOut,proto3" json:"IdleTimeOut,omitempty"` + MaxDone int32 `protobuf:"varint,12,opt,name=MaxDone,proto3" json:"MaxDone,omitempty"` + MaxPend int32 `protobuf:"varint,13,opt,name=MaxPend,proto3" json:"MaxPend,omitempty"` + MaxPacket int32 `protobuf:"varint,14,opt,name=MaxPacket,proto3" json:"MaxPacket,omitempty"` + RcvBuff int32 `protobuf:"varint,15,opt,name=RcvBuff,proto3" json:"RcvBuff,omitempty"` + SndBuff int32 `protobuf:"varint,16,opt,name=SndBuff,proto3" json:"SndBuff,omitempty"` + SoLinger int32 `protobuf:"varint,17,opt,name=SoLinger,proto3" json:"SoLinger,omitempty"` + IsAuth bool `protobuf:"varint,18,opt,name=IsAuth,proto3" json:"IsAuth,omitempty"` + KeepAlive bool `protobuf:"varint,19,opt,name=KeepAlive,proto3" json:"KeepAlive,omitempty"` + NoDelay bool `protobuf:"varint,20,opt,name=NoDelay,proto3" json:"NoDelay,omitempty"` + IsAutoReconn bool `protobuf:"varint,21,opt,name=IsAutoReconn,proto3" json:"IsAutoReconn,omitempty"` + IsInnerLink bool `protobuf:"varint,22,opt,name=IsInnerLink,proto3" json:"IsInnerLink,omitempty"` + SupportFragment bool `protobuf:"varint,23,opt,name=SupportFragment,proto3" json:"SupportFragment,omitempty"` + AllowMultiConn bool `protobuf:"varint,24,opt,name=AllowMultiConn,proto3" json:"AllowMultiConn,omitempty"` + AuthKey string `protobuf:"bytes,25,opt,name=AuthKey,proto3" json:"AuthKey,omitempty"` + EncoderName string `protobuf:"bytes,26,opt,name=EncoderName,proto3" json:"EncoderName,omitempty"` + DecoderName string `protobuf:"bytes,27,opt,name=DecoderName,proto3" json:"DecoderName,omitempty"` + FilterChain []string `protobuf:"bytes,28,rep,name=FilterChain,proto3" json:"FilterChain,omitempty"` + HandlerChain []string `protobuf:"bytes,29,rep,name=HandlerChain,proto3" json:"HandlerChain,omitempty"` + Protocol string `protobuf:"bytes,30,opt,name=Protocol,proto3" json:"Protocol,omitempty"` + Path string `protobuf:"bytes,31,opt,name=Path,proto3" json:"Path,omitempty"` + OuterIp string `protobuf:"bytes,32,opt,name=OuterIp,proto3" json:"OuterIp,omitempty"` +} + +func (x *ServiceInfo) Reset() { + *x = ServiceInfo{} + if protoimpl.UnsafeEnabled { + mi := &file_serviceinfo_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ServiceInfo) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ServiceInfo) ProtoMessage() {} + +func (x *ServiceInfo) ProtoReflect() protoreflect.Message { + mi := &file_serviceinfo_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ServiceInfo.ProtoReflect.Descriptor instead. +func (*ServiceInfo) Descriptor() ([]byte, []int) { + return file_serviceinfo_proto_rawDescGZIP(), []int{0} +} + +func (x *ServiceInfo) GetAreaId() int32 { + if x != nil { + return x.AreaId + } + return 0 +} + +func (x *ServiceInfo) GetSrvId() int32 { + if x != nil { + return x.SrvId + } + return 0 +} + +func (x *ServiceInfo) GetSrvType() int32 { + if x != nil { + return x.SrvType + } + return 0 +} + +func (x *ServiceInfo) GetSrvPID() int32 { + if x != nil { + return x.SrvPID + } + return 0 +} + +func (x *ServiceInfo) GetSrvName() string { + if x != nil { + return x.SrvName + } + return "" +} + +func (x *ServiceInfo) GetNetworkType() string { + if x != nil { + return x.NetworkType + } + return "" +} + +func (x *ServiceInfo) GetIp() string { + if x != nil { + return x.Ip + } + return "" +} + +func (x *ServiceInfo) GetPort() int32 { + if x != nil { + return x.Port + } + return 0 +} + +func (x *ServiceInfo) GetWriteTimeOut() int32 { + if x != nil { + return x.WriteTimeOut + } + return 0 +} + +func (x *ServiceInfo) GetReadTimeOut() int32 { + if x != nil { + return x.ReadTimeOut + } + return 0 +} + +func (x *ServiceInfo) GetIdleTimeOut() int32 { + if x != nil { + return x.IdleTimeOut + } + return 0 +} + +func (x *ServiceInfo) GetMaxDone() int32 { + if x != nil { + return x.MaxDone + } + return 0 +} + +func (x *ServiceInfo) GetMaxPend() int32 { + if x != nil { + return x.MaxPend + } + return 0 +} + +func (x *ServiceInfo) GetMaxPacket() int32 { + if x != nil { + return x.MaxPacket + } + return 0 +} + +func (x *ServiceInfo) GetRcvBuff() int32 { + if x != nil { + return x.RcvBuff + } + return 0 +} + +func (x *ServiceInfo) GetSndBuff() int32 { + if x != nil { + return x.SndBuff + } + return 0 +} + +func (x *ServiceInfo) GetSoLinger() int32 { + if x != nil { + return x.SoLinger + } + return 0 +} + +func (x *ServiceInfo) GetIsAuth() bool { + if x != nil { + return x.IsAuth + } + return false +} + +func (x *ServiceInfo) GetKeepAlive() bool { + if x != nil { + return x.KeepAlive + } + return false +} + +func (x *ServiceInfo) GetNoDelay() bool { + if x != nil { + return x.NoDelay + } + return false +} + +func (x *ServiceInfo) GetIsAutoReconn() bool { + if x != nil { + return x.IsAutoReconn + } + return false +} + +func (x *ServiceInfo) GetIsInnerLink() bool { + if x != nil { + return x.IsInnerLink + } + return false +} + +func (x *ServiceInfo) GetSupportFragment() bool { + if x != nil { + return x.SupportFragment + } + return false +} + +func (x *ServiceInfo) GetAllowMultiConn() bool { + if x != nil { + return x.AllowMultiConn + } + return false +} + +func (x *ServiceInfo) GetAuthKey() string { + if x != nil { + return x.AuthKey + } + return "" +} + +func (x *ServiceInfo) GetEncoderName() string { + if x != nil { + return x.EncoderName + } + return "" +} + +func (x *ServiceInfo) GetDecoderName() string { + if x != nil { + return x.DecoderName + } + return "" +} + +func (x *ServiceInfo) GetFilterChain() []string { + if x != nil { + return x.FilterChain + } + return nil +} + +func (x *ServiceInfo) GetHandlerChain() []string { + if x != nil { + return x.HandlerChain + } + return nil +} + +func (x *ServiceInfo) GetProtocol() string { + if x != nil { + return x.Protocol + } + return "" +} + +func (x *ServiceInfo) GetPath() string { + if x != nil { + return x.Path + } + return "" +} + +func (x *ServiceInfo) GetOuterIp() string { + if x != nil { + return x.OuterIp + } + return "" +} + +type SSServiceRegiste struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Services []*ServiceInfo `protobuf:"bytes,1,rep,name=Services,proto3" json:"Services,omitempty"` +} + +func (x *SSServiceRegiste) Reset() { + *x = SSServiceRegiste{} + if protoimpl.UnsafeEnabled { + mi := &file_serviceinfo_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *SSServiceRegiste) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SSServiceRegiste) ProtoMessage() {} + +func (x *SSServiceRegiste) ProtoReflect() protoreflect.Message { + mi := &file_serviceinfo_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SSServiceRegiste.ProtoReflect.Descriptor instead. +func (*SSServiceRegiste) Descriptor() ([]byte, []int) { + return file_serviceinfo_proto_rawDescGZIP(), []int{1} +} + +func (x *SSServiceRegiste) GetServices() []*ServiceInfo { + if x != nil { + return x.Services + } + return nil +} + +type SSServiceInfo struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Service *ServiceInfo `protobuf:"bytes,1,opt,name=Service,proto3" json:"Service,omitempty"` +} + +func (x *SSServiceInfo) Reset() { + *x = SSServiceInfo{} + if protoimpl.UnsafeEnabled { + mi := &file_serviceinfo_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *SSServiceInfo) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SSServiceInfo) ProtoMessage() {} + +func (x *SSServiceInfo) ProtoReflect() protoreflect.Message { + mi := &file_serviceinfo_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SSServiceInfo.ProtoReflect.Descriptor instead. +func (*SSServiceInfo) Descriptor() ([]byte, []int) { + return file_serviceinfo_proto_rawDescGZIP(), []int{2} +} + +func (x *SSServiceInfo) GetService() *ServiceInfo { + if x != nil { + return x.Service + } + return nil +} + +type SSServiceShut struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Service *ServiceInfo `protobuf:"bytes,1,opt,name=Service,proto3" json:"Service,omitempty"` +} + +func (x *SSServiceShut) Reset() { + *x = SSServiceShut{} + if protoimpl.UnsafeEnabled { + mi := &file_serviceinfo_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *SSServiceShut) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SSServiceShut) ProtoMessage() {} + +func (x *SSServiceShut) ProtoReflect() protoreflect.Message { + mi := &file_serviceinfo_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SSServiceShut.ProtoReflect.Descriptor instead. +func (*SSServiceShut) Descriptor() ([]byte, []int) { + return file_serviceinfo_proto_rawDescGZIP(), []int{3} +} + +func (x *SSServiceShut) GetService() *ServiceInfo { + if x != nil { + return x.Service + } + return nil +} + +var File_serviceinfo_proto protoreflect.FileDescriptor + +var file_serviceinfo_proto_rawDesc = []byte{ + 0x0a, 0x11, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x69, 0x6e, 0x66, 0x6f, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x12, 0x08, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x22, 0xad, 0x07, + 0x0a, 0x0b, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x16, 0x0a, + 0x06, 0x41, 0x72, 0x65, 0x61, 0x49, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x06, 0x41, + 0x72, 0x65, 0x61, 0x49, 0x64, 0x12, 0x14, 0x0a, 0x05, 0x53, 0x72, 0x76, 0x49, 0x64, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x53, 0x72, 0x76, 0x49, 0x64, 0x12, 0x18, 0x0a, 0x07, 0x53, + 0x72, 0x76, 0x54, 0x79, 0x70, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, 0x52, 0x07, 0x53, 0x72, + 0x76, 0x54, 0x79, 0x70, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x53, 0x72, 0x76, 0x50, 0x49, 0x44, 0x18, + 0x04, 0x20, 0x01, 0x28, 0x05, 0x52, 0x06, 0x53, 0x72, 0x76, 0x50, 0x49, 0x44, 0x12, 0x18, 0x0a, + 0x07, 0x53, 0x72, 0x76, 0x4e, 0x61, 0x6d, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, + 0x53, 0x72, 0x76, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x20, 0x0a, 0x0b, 0x4e, 0x65, 0x74, 0x77, 0x6f, + 0x72, 0x6b, 0x54, 0x79, 0x70, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x4e, 0x65, + 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0e, 0x0a, 0x02, 0x49, 0x70, 0x18, + 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x49, 0x70, 0x12, 0x12, 0x0a, 0x04, 0x50, 0x6f, 0x72, + 0x74, 0x18, 0x08, 0x20, 0x01, 0x28, 0x05, 0x52, 0x04, 0x50, 0x6f, 0x72, 0x74, 0x12, 0x22, 0x0a, + 0x0c, 0x57, 0x72, 0x69, 0x74, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x4f, 0x75, 0x74, 0x18, 0x09, 0x20, + 0x01, 0x28, 0x05, 0x52, 0x0c, 0x57, 0x72, 0x69, 0x74, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x4f, 0x75, + 0x74, 0x12, 0x20, 0x0a, 0x0b, 0x52, 0x65, 0x61, 0x64, 0x54, 0x69, 0x6d, 0x65, 0x4f, 0x75, 0x74, + 0x18, 0x0a, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0b, 0x52, 0x65, 0x61, 0x64, 0x54, 0x69, 0x6d, 0x65, + 0x4f, 0x75, 0x74, 0x12, 0x20, 0x0a, 0x0b, 0x49, 0x64, 0x6c, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x4f, + 0x75, 0x74, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0b, 0x49, 0x64, 0x6c, 0x65, 0x54, 0x69, + 0x6d, 0x65, 0x4f, 0x75, 0x74, 0x12, 0x18, 0x0a, 0x07, 0x4d, 0x61, 0x78, 0x44, 0x6f, 0x6e, 0x65, + 0x18, 0x0c, 0x20, 0x01, 0x28, 0x05, 0x52, 0x07, 0x4d, 0x61, 0x78, 0x44, 0x6f, 0x6e, 0x65, 0x12, + 0x18, 0x0a, 0x07, 0x4d, 0x61, 0x78, 0x50, 0x65, 0x6e, 0x64, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x05, + 0x52, 0x07, 0x4d, 0x61, 0x78, 0x50, 0x65, 0x6e, 0x64, 0x12, 0x1c, 0x0a, 0x09, 0x4d, 0x61, 0x78, + 0x50, 0x61, 0x63, 0x6b, 0x65, 0x74, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x05, 0x52, 0x09, 0x4d, 0x61, + 0x78, 0x50, 0x61, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x18, 0x0a, 0x07, 0x52, 0x63, 0x76, 0x42, 0x75, + 0x66, 0x66, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x05, 0x52, 0x07, 0x52, 0x63, 0x76, 0x42, 0x75, 0x66, + 0x66, 0x12, 0x18, 0x0a, 0x07, 0x53, 0x6e, 0x64, 0x42, 0x75, 0x66, 0x66, 0x18, 0x10, 0x20, 0x01, + 0x28, 0x05, 0x52, 0x07, 0x53, 0x6e, 0x64, 0x42, 0x75, 0x66, 0x66, 0x12, 0x1a, 0x0a, 0x08, 0x53, + 0x6f, 0x4c, 0x69, 0x6e, 0x67, 0x65, 0x72, 0x18, 0x11, 0x20, 0x01, 0x28, 0x05, 0x52, 0x08, 0x53, + 0x6f, 0x4c, 0x69, 0x6e, 0x67, 0x65, 0x72, 0x12, 0x16, 0x0a, 0x06, 0x49, 0x73, 0x41, 0x75, 0x74, + 0x68, 0x18, 0x12, 0x20, 0x01, 0x28, 0x08, 0x52, 0x06, 0x49, 0x73, 0x41, 0x75, 0x74, 0x68, 0x12, + 0x1c, 0x0a, 0x09, 0x4b, 0x65, 0x65, 0x70, 0x41, 0x6c, 0x69, 0x76, 0x65, 0x18, 0x13, 0x20, 0x01, + 0x28, 0x08, 0x52, 0x09, 0x4b, 0x65, 0x65, 0x70, 0x41, 0x6c, 0x69, 0x76, 0x65, 0x12, 0x18, 0x0a, + 0x07, 0x4e, 0x6f, 0x44, 0x65, 0x6c, 0x61, 0x79, 0x18, 0x14, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, + 0x4e, 0x6f, 0x44, 0x65, 0x6c, 0x61, 0x79, 0x12, 0x22, 0x0a, 0x0c, 0x49, 0x73, 0x41, 0x75, 0x74, + 0x6f, 0x52, 0x65, 0x63, 0x6f, 0x6e, 0x6e, 0x18, 0x15, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0c, 0x49, + 0x73, 0x41, 0x75, 0x74, 0x6f, 0x52, 0x65, 0x63, 0x6f, 0x6e, 0x6e, 0x12, 0x20, 0x0a, 0x0b, 0x49, + 0x73, 0x49, 0x6e, 0x6e, 0x65, 0x72, 0x4c, 0x69, 0x6e, 0x6b, 0x18, 0x16, 0x20, 0x01, 0x28, 0x08, + 0x52, 0x0b, 0x49, 0x73, 0x49, 0x6e, 0x6e, 0x65, 0x72, 0x4c, 0x69, 0x6e, 0x6b, 0x12, 0x28, 0x0a, + 0x0f, 0x53, 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74, 0x46, 0x72, 0x61, 0x67, 0x6d, 0x65, 0x6e, 0x74, + 0x18, 0x17, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0f, 0x53, 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74, 0x46, + 0x72, 0x61, 0x67, 0x6d, 0x65, 0x6e, 0x74, 0x12, 0x26, 0x0a, 0x0e, 0x41, 0x6c, 0x6c, 0x6f, 0x77, + 0x4d, 0x75, 0x6c, 0x74, 0x69, 0x43, 0x6f, 0x6e, 0x6e, 0x18, 0x18, 0x20, 0x01, 0x28, 0x08, 0x52, + 0x0e, 0x41, 0x6c, 0x6c, 0x6f, 0x77, 0x4d, 0x75, 0x6c, 0x74, 0x69, 0x43, 0x6f, 0x6e, 0x6e, 0x12, + 0x18, 0x0a, 0x07, 0x41, 0x75, 0x74, 0x68, 0x4b, 0x65, 0x79, 0x18, 0x19, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x07, 0x41, 0x75, 0x74, 0x68, 0x4b, 0x65, 0x79, 0x12, 0x20, 0x0a, 0x0b, 0x45, 0x6e, 0x63, + 0x6f, 0x64, 0x65, 0x72, 0x4e, 0x61, 0x6d, 0x65, 0x18, 0x1a, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, + 0x45, 0x6e, 0x63, 0x6f, 0x64, 0x65, 0x72, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x20, 0x0a, 0x0b, 0x44, + 0x65, 0x63, 0x6f, 0x64, 0x65, 0x72, 0x4e, 0x61, 0x6d, 0x65, 0x18, 0x1b, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x0b, 0x44, 0x65, 0x63, 0x6f, 0x64, 0x65, 0x72, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x20, 0x0a, + 0x0b, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x18, 0x1c, 0x20, 0x03, + 0x28, 0x09, 0x52, 0x0b, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x12, + 0x22, 0x0a, 0x0c, 0x48, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x72, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x18, + 0x1d, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0c, 0x48, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x72, 0x43, 0x68, + 0x61, 0x69, 0x6e, 0x12, 0x1a, 0x0a, 0x08, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x18, + 0x1e, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x12, + 0x12, 0x0a, 0x04, 0x50, 0x61, 0x74, 0x68, 0x18, 0x1f, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x50, + 0x61, 0x74, 0x68, 0x12, 0x18, 0x0a, 0x07, 0x4f, 0x75, 0x74, 0x65, 0x72, 0x49, 0x70, 0x18, 0x20, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x4f, 0x75, 0x74, 0x65, 0x72, 0x49, 0x70, 0x22, 0x45, 0x0a, + 0x10, 0x53, 0x53, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, + 0x65, 0x12, 0x31, 0x0a, 0x08, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x18, 0x01, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x2e, 0x53, + 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x08, 0x53, 0x65, 0x72, 0x76, + 0x69, 0x63, 0x65, 0x73, 0x22, 0x40, 0x0a, 0x0d, 0x53, 0x53, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, + 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x2f, 0x0a, 0x07, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, + 0x6c, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x07, 0x53, + 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x22, 0x40, 0x0a, 0x0d, 0x53, 0x53, 0x53, 0x65, 0x72, 0x76, + 0x69, 0x63, 0x65, 0x53, 0x68, 0x75, 0x74, 0x12, 0x2f, 0x0a, 0x07, 0x53, 0x65, 0x72, 0x76, 0x69, + 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x63, 0x6f, 0x6c, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x52, + 0x07, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x42, 0x0c, 0x5a, 0x0a, 0x2e, 0x3b, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_serviceinfo_proto_rawDescOnce sync.Once + file_serviceinfo_proto_rawDescData = file_serviceinfo_proto_rawDesc +) + +func file_serviceinfo_proto_rawDescGZIP() []byte { + file_serviceinfo_proto_rawDescOnce.Do(func() { + file_serviceinfo_proto_rawDescData = protoimpl.X.CompressGZIP(file_serviceinfo_proto_rawDescData) + }) + return file_serviceinfo_proto_rawDescData +} + +var file_serviceinfo_proto_msgTypes = make([]protoimpl.MessageInfo, 4) +var file_serviceinfo_proto_goTypes = []interface{}{ + (*ServiceInfo)(nil), // 0: protocol.ServiceInfo + (*SSServiceRegiste)(nil), // 1: protocol.SSServiceRegiste + (*SSServiceInfo)(nil), // 2: protocol.SSServiceInfo + (*SSServiceShut)(nil), // 3: protocol.SSServiceShut +} +var file_serviceinfo_proto_depIdxs = []int32{ + 0, // 0: protocol.SSServiceRegiste.Services:type_name -> protocol.ServiceInfo + 0, // 1: protocol.SSServiceInfo.Service:type_name -> protocol.ServiceInfo + 0, // 2: protocol.SSServiceShut.Service:type_name -> protocol.ServiceInfo + 3, // [3:3] is the sub-list for method output_type + 3, // [3:3] is the sub-list for method input_type + 3, // [3:3] is the sub-list for extension type_name + 3, // [3:3] is the sub-list for extension extendee + 0, // [0:3] is the sub-list for field type_name +} + +func init() { file_serviceinfo_proto_init() } +func file_serviceinfo_proto_init() { + if File_serviceinfo_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_serviceinfo_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ServiceInfo); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_serviceinfo_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SSServiceRegiste); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_serviceinfo_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SSServiceInfo); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_serviceinfo_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SSServiceShut); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_serviceinfo_proto_rawDesc, + NumEnums: 0, + NumMessages: 4, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_serviceinfo_proto_goTypes, + DependencyIndexes: file_serviceinfo_proto_depIdxs, + MessageInfos: file_serviceinfo_proto_msgTypes, + }.Build() + File_serviceinfo_proto = out.File + file_serviceinfo_proto_rawDesc = nil + file_serviceinfo_proto_goTypes = nil + file_serviceinfo_proto_depIdxs = nil +} diff --git a/srvlib/protocol/serviceinfo.proto b/srvlib/protocol/serviceinfo.proto new file mode 100644 index 0000000..deb604e --- /dev/null +++ b/srvlib/protocol/serviceinfo.proto @@ -0,0 +1,50 @@ +syntax = "proto3"; +package protocol; +option go_package = ".;protocol"; + +message ServiceInfo { + int32 AreaId = 1; + int32 SrvId = 2; + int32 SrvType = 3; + int32 SrvPID = 4; + string SrvName = 5; + string NetworkType = 6; + string Ip = 7; + int32 Port = 8; + int32 WriteTimeOut = 9; + int32 ReadTimeOut = 10; + int32 IdleTimeOut = 11; + int32 MaxDone = 12; + int32 MaxPend = 13; + int32 MaxPacket = 14; + int32 RcvBuff = 15; + int32 SndBuff = 16; + int32 SoLinger = 17; + bool IsAuth = 18; + bool KeepAlive = 19; + bool NoDelay = 20; + bool IsAutoReconn = 21; + bool IsInnerLink = 22; + bool SupportFragment = 23; + bool AllowMultiConn = 24; + string AuthKey = 25; + string EncoderName = 26; + string DecoderName = 27; + repeated string FilterChain = 28; + repeated string HandlerChain = 29; + string Protocol = 30; + string Path = 31; + string OuterIp = 32; +} + +message SSServiceRegiste { + repeated ServiceInfo Services = 1; +} + +message SSServiceInfo { + ServiceInfo Service = 1; +} + +message SSServiceShut { + ServiceInfo Service = 1; +} \ No newline at end of file diff --git a/srvlib/protocol/srvlibpacketid.pb.go b/srvlib/protocol/srvlibpacketid.pb.go new file mode 100644 index 0000000..3f69ff4 --- /dev/null +++ b/srvlib/protocol/srvlibpacketid.pb.go @@ -0,0 +1,166 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.25.0-devel +// protoc v3.5.1 +// source: srvlibpacketid.proto + +package protocol + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type SrvlibPacketID int32 + +const ( + SrvlibPacketID_PACKET_SRVLIB_UNKNOW SrvlibPacketID = 0 + SrvlibPacketID_PACKET_SS_REGISTE SrvlibPacketID = -2000 + SrvlibPacketID_PACKET_SS_MULTICAST SrvlibPacketID = -2001 + SrvlibPacketID_PACKET_SS_BROADCAST SrvlibPacketID = -2002 + SrvlibPacketID_PACKET_SS_TRANSIT SrvlibPacketID = -2003 + SrvlibPacketID_PACKET_SS_REDIRECT SrvlibPacketID = -2004 + SrvlibPacketID_PACKET_SS_SERVICE_REGISTE SrvlibPacketID = -2005 + SrvlibPacketID_PACKET_SS_SERVICE_INFO SrvlibPacketID = -2006 + SrvlibPacketID_PACKET_SS_SERVICE_SHUT SrvlibPacketID = -2007 +) + +// Enum value maps for SrvlibPacketID. +var ( + SrvlibPacketID_name = map[int32]string{ + 0: "PACKET_SRVLIB_UNKNOW", + -2000: "PACKET_SS_REGISTE", + -2001: "PACKET_SS_MULTICAST", + -2002: "PACKET_SS_BROADCAST", + -2003: "PACKET_SS_TRANSIT", + -2004: "PACKET_SS_REDIRECT", + -2005: "PACKET_SS_SERVICE_REGISTE", + -2006: "PACKET_SS_SERVICE_INFO", + -2007: "PACKET_SS_SERVICE_SHUT", + } + SrvlibPacketID_value = map[string]int32{ + "PACKET_SRVLIB_UNKNOW": 0, + "PACKET_SS_REGISTE": -2000, + "PACKET_SS_MULTICAST": -2001, + "PACKET_SS_BROADCAST": -2002, + "PACKET_SS_TRANSIT": -2003, + "PACKET_SS_REDIRECT": -2004, + "PACKET_SS_SERVICE_REGISTE": -2005, + "PACKET_SS_SERVICE_INFO": -2006, + "PACKET_SS_SERVICE_SHUT": -2007, + } +) + +func (x SrvlibPacketID) Enum() *SrvlibPacketID { + p := new(SrvlibPacketID) + *p = x + return p +} + +func (x SrvlibPacketID) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (SrvlibPacketID) Descriptor() protoreflect.EnumDescriptor { + return file_srvlibpacketid_proto_enumTypes[0].Descriptor() +} + +func (SrvlibPacketID) Type() protoreflect.EnumType { + return &file_srvlibpacketid_proto_enumTypes[0] +} + +func (x SrvlibPacketID) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use SrvlibPacketID.Descriptor instead. +func (SrvlibPacketID) EnumDescriptor() ([]byte, []int) { + return file_srvlibpacketid_proto_rawDescGZIP(), []int{0} +} + +var File_srvlibpacketid_proto protoreflect.FileDescriptor + +var file_srvlibpacketid_proto_rawDesc = []byte{ + 0x0a, 0x14, 0x73, 0x72, 0x76, 0x6c, 0x69, 0x62, 0x70, 0x61, 0x63, 0x6b, 0x65, 0x74, 0x69, 0x64, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x08, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, + 0x2a, 0xc1, 0x02, 0x0a, 0x0e, 0x53, 0x72, 0x76, 0x6c, 0x69, 0x62, 0x50, 0x61, 0x63, 0x6b, 0x65, + 0x74, 0x49, 0x44, 0x12, 0x18, 0x0a, 0x14, 0x50, 0x41, 0x43, 0x4b, 0x45, 0x54, 0x5f, 0x53, 0x52, + 0x56, 0x4c, 0x49, 0x42, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x10, 0x00, 0x12, 0x1e, 0x0a, + 0x11, 0x50, 0x41, 0x43, 0x4b, 0x45, 0x54, 0x5f, 0x53, 0x53, 0x5f, 0x52, 0x45, 0x47, 0x49, 0x53, + 0x54, 0x45, 0x10, 0xb0, 0xf0, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x01, 0x12, 0x20, 0x0a, + 0x13, 0x50, 0x41, 0x43, 0x4b, 0x45, 0x54, 0x5f, 0x53, 0x53, 0x5f, 0x4d, 0x55, 0x4c, 0x54, 0x49, + 0x43, 0x41, 0x53, 0x54, 0x10, 0xaf, 0xf0, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x01, 0x12, + 0x20, 0x0a, 0x13, 0x50, 0x41, 0x43, 0x4b, 0x45, 0x54, 0x5f, 0x53, 0x53, 0x5f, 0x42, 0x52, 0x4f, + 0x41, 0x44, 0x43, 0x41, 0x53, 0x54, 0x10, 0xae, 0xf0, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0x01, 0x12, 0x1e, 0x0a, 0x11, 0x50, 0x41, 0x43, 0x4b, 0x45, 0x54, 0x5f, 0x53, 0x53, 0x5f, 0x54, + 0x52, 0x41, 0x4e, 0x53, 0x49, 0x54, 0x10, 0xad, 0xf0, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0x01, 0x12, 0x1f, 0x0a, 0x12, 0x50, 0x41, 0x43, 0x4b, 0x45, 0x54, 0x5f, 0x53, 0x53, 0x5f, 0x52, + 0x45, 0x44, 0x49, 0x52, 0x45, 0x43, 0x54, 0x10, 0xac, 0xf0, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0x01, 0x12, 0x26, 0x0a, 0x19, 0x50, 0x41, 0x43, 0x4b, 0x45, 0x54, 0x5f, 0x53, 0x53, 0x5f, + 0x53, 0x45, 0x52, 0x56, 0x49, 0x43, 0x45, 0x5f, 0x52, 0x45, 0x47, 0x49, 0x53, 0x54, 0x45, 0x10, + 0xab, 0xf0, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x01, 0x12, 0x23, 0x0a, 0x16, 0x50, 0x41, + 0x43, 0x4b, 0x45, 0x54, 0x5f, 0x53, 0x53, 0x5f, 0x53, 0x45, 0x52, 0x56, 0x49, 0x43, 0x45, 0x5f, + 0x49, 0x4e, 0x46, 0x4f, 0x10, 0xaa, 0xf0, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x01, 0x12, + 0x23, 0x0a, 0x16, 0x50, 0x41, 0x43, 0x4b, 0x45, 0x54, 0x5f, 0x53, 0x53, 0x5f, 0x53, 0x45, 0x52, + 0x56, 0x49, 0x43, 0x45, 0x5f, 0x53, 0x48, 0x55, 0x54, 0x10, 0xa9, 0xf0, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0x01, 0x42, 0x0c, 0x5a, 0x0a, 0x2e, 0x3b, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, + 0x6f, 0x6c, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_srvlibpacketid_proto_rawDescOnce sync.Once + file_srvlibpacketid_proto_rawDescData = file_srvlibpacketid_proto_rawDesc +) + +func file_srvlibpacketid_proto_rawDescGZIP() []byte { + file_srvlibpacketid_proto_rawDescOnce.Do(func() { + file_srvlibpacketid_proto_rawDescData = protoimpl.X.CompressGZIP(file_srvlibpacketid_proto_rawDescData) + }) + return file_srvlibpacketid_proto_rawDescData +} + +var file_srvlibpacketid_proto_enumTypes = make([]protoimpl.EnumInfo, 1) +var file_srvlibpacketid_proto_goTypes = []interface{}{ + (SrvlibPacketID)(0), // 0: protocol.SrvlibPacketID +} +var file_srvlibpacketid_proto_depIdxs = []int32{ + 0, // [0:0] is the sub-list for method output_type + 0, // [0:0] is the sub-list for method input_type + 0, // [0:0] is the sub-list for extension type_name + 0, // [0:0] is the sub-list for extension extendee + 0, // [0:0] is the sub-list for field type_name +} + +func init() { file_srvlibpacketid_proto_init() } +func file_srvlibpacketid_proto_init() { + if File_srvlibpacketid_proto != nil { + return + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_srvlibpacketid_proto_rawDesc, + NumEnums: 1, + NumMessages: 0, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_srvlibpacketid_proto_goTypes, + DependencyIndexes: file_srvlibpacketid_proto_depIdxs, + EnumInfos: file_srvlibpacketid_proto_enumTypes, + }.Build() + File_srvlibpacketid_proto = out.File + file_srvlibpacketid_proto_rawDesc = nil + file_srvlibpacketid_proto_goTypes = nil + file_srvlibpacketid_proto_depIdxs = nil +} diff --git a/srvlib/protocol/srvlibpacketid.proto b/srvlib/protocol/srvlibpacketid.proto new file mode 100644 index 0000000..c3716d6 --- /dev/null +++ b/srvlib/protocol/srvlibpacketid.proto @@ -0,0 +1,15 @@ +syntax = "proto3"; +package protocol; +option go_package = ".;protocol"; + +enum SrvlibPacketID { + PACKET_SRVLIB_UNKNOW= 0; + PACKET_SS_REGISTE = -2000; + PACKET_SS_MULTICAST = -2001; + PACKET_SS_BROADCAST = -2002; // 消息广播;给一个 + PACKET_SS_TRANSIT = -2003; + PACKET_SS_REDIRECT = -2004; + PACKET_SS_SERVICE_REGISTE = -2005; + PACKET_SS_SERVICE_INFO = -2006; + PACKET_SS_SERVICE_SHUT = -2007; +} \ No newline at end of file diff --git a/srvlib/protocol/srvregiste.pb.go b/srvlib/protocol/srvregiste.pb.go new file mode 100644 index 0000000..c478792 --- /dev/null +++ b/srvlib/protocol/srvregiste.pb.go @@ -0,0 +1,179 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.25.0-devel +// protoc v3.5.1 +// source: srvregiste.proto + +package protocol + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type SSSrvRegiste struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Id int32 `protobuf:"varint,1,opt,name=Id,proto3" json:"Id,omitempty"` + Type int32 `protobuf:"varint,2,opt,name=Type,proto3" json:"Type,omitempty"` + AreaId int32 `protobuf:"varint,3,opt,name=AreaId,proto3" json:"AreaId,omitempty"` + Name string `protobuf:"bytes,4,opt,name=Name,proto3" json:"Name,omitempty"` + Data string `protobuf:"bytes,5,opt,name=Data,proto3" json:"Data,omitempty"` +} + +func (x *SSSrvRegiste) Reset() { + *x = SSSrvRegiste{} + if protoimpl.UnsafeEnabled { + mi := &file_srvregiste_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *SSSrvRegiste) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SSSrvRegiste) ProtoMessage() {} + +func (x *SSSrvRegiste) ProtoReflect() protoreflect.Message { + mi := &file_srvregiste_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SSSrvRegiste.ProtoReflect.Descriptor instead. +func (*SSSrvRegiste) Descriptor() ([]byte, []int) { + return file_srvregiste_proto_rawDescGZIP(), []int{0} +} + +func (x *SSSrvRegiste) GetId() int32 { + if x != nil { + return x.Id + } + return 0 +} + +func (x *SSSrvRegiste) GetType() int32 { + if x != nil { + return x.Type + } + return 0 +} + +func (x *SSSrvRegiste) GetAreaId() int32 { + if x != nil { + return x.AreaId + } + return 0 +} + +func (x *SSSrvRegiste) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *SSSrvRegiste) GetData() string { + if x != nil { + return x.Data + } + return "" +} + +var File_srvregiste_proto protoreflect.FileDescriptor + +var file_srvregiste_proto_rawDesc = []byte{ + 0x0a, 0x10, 0x73, 0x72, 0x76, 0x72, 0x65, 0x67, 0x69, 0x73, 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x12, 0x08, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x22, 0x72, 0x0a, 0x0c, + 0x53, 0x53, 0x53, 0x72, 0x76, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x65, 0x12, 0x0e, 0x0a, 0x02, + 0x49, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x02, 0x49, 0x64, 0x12, 0x12, 0x0a, 0x04, + 0x54, 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x04, 0x54, 0x79, 0x70, 0x65, + 0x12, 0x16, 0x0a, 0x06, 0x41, 0x72, 0x65, 0x61, 0x49, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, + 0x52, 0x06, 0x41, 0x72, 0x65, 0x61, 0x49, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x4e, 0x61, 0x6d, 0x65, + 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x12, 0x0a, 0x04, + 0x44, 0x61, 0x74, 0x61, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x44, 0x61, 0x74, 0x61, + 0x42, 0x0c, 0x5a, 0x0a, 0x2e, 0x3b, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x62, 0x06, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_srvregiste_proto_rawDescOnce sync.Once + file_srvregiste_proto_rawDescData = file_srvregiste_proto_rawDesc +) + +func file_srvregiste_proto_rawDescGZIP() []byte { + file_srvregiste_proto_rawDescOnce.Do(func() { + file_srvregiste_proto_rawDescData = protoimpl.X.CompressGZIP(file_srvregiste_proto_rawDescData) + }) + return file_srvregiste_proto_rawDescData +} + +var file_srvregiste_proto_msgTypes = make([]protoimpl.MessageInfo, 1) +var file_srvregiste_proto_goTypes = []interface{}{ + (*SSSrvRegiste)(nil), // 0: protocol.SSSrvRegiste +} +var file_srvregiste_proto_depIdxs = []int32{ + 0, // [0:0] is the sub-list for method output_type + 0, // [0:0] is the sub-list for method input_type + 0, // [0:0] is the sub-list for extension type_name + 0, // [0:0] is the sub-list for extension extendee + 0, // [0:0] is the sub-list for field type_name +} + +func init() { file_srvregiste_proto_init() } +func file_srvregiste_proto_init() { + if File_srvregiste_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_srvregiste_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SSSrvRegiste); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_srvregiste_proto_rawDesc, + NumEnums: 0, + NumMessages: 1, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_srvregiste_proto_goTypes, + DependencyIndexes: file_srvregiste_proto_depIdxs, + MessageInfos: file_srvregiste_proto_msgTypes, + }.Build() + File_srvregiste_proto = out.File + file_srvregiste_proto_rawDesc = nil + file_srvregiste_proto_goTypes = nil + file_srvregiste_proto_depIdxs = nil +} diff --git a/srvlib/protocol/srvregiste.proto b/srvlib/protocol/srvregiste.proto new file mode 100644 index 0000000..3d652a2 --- /dev/null +++ b/srvlib/protocol/srvregiste.proto @@ -0,0 +1,11 @@ +syntax = "proto3"; +package protocol; +option go_package = ".;protocol"; + +message SSSrvRegiste { + int32 Id = 1; + int32 Type = 2; + int32 AreaId = 3; + string Name = 4; + string Data = 5; +} \ No newline at end of file diff --git a/srvlib/protocol/transit.pb.go b/srvlib/protocol/transit.pb.go new file mode 100644 index 0000000..897f942 --- /dev/null +++ b/srvlib/protocol/transit.pb.go @@ -0,0 +1,179 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.25.0-devel +// protoc v3.5.1 +// source: transit.proto + +package protocol + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type SSPacketTransit struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + SArea int32 `protobuf:"varint,1,opt,name=SArea,proto3" json:"SArea,omitempty"` + SType int32 `protobuf:"varint,2,opt,name=SType,proto3" json:"SType,omitempty"` + SId int32 `protobuf:"varint,3,opt,name=SId,proto3" json:"SId,omitempty"` + PacketId int32 `protobuf:"varint,4,opt,name=PacketId,proto3" json:"PacketId,omitempty"` + Data []byte `protobuf:"bytes,5,opt,name=Data,proto3" json:"Data,omitempty"` +} + +func (x *SSPacketTransit) Reset() { + *x = SSPacketTransit{} + if protoimpl.UnsafeEnabled { + mi := &file_transit_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *SSPacketTransit) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SSPacketTransit) ProtoMessage() {} + +func (x *SSPacketTransit) ProtoReflect() protoreflect.Message { + mi := &file_transit_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SSPacketTransit.ProtoReflect.Descriptor instead. +func (*SSPacketTransit) Descriptor() ([]byte, []int) { + return file_transit_proto_rawDescGZIP(), []int{0} +} + +func (x *SSPacketTransit) GetSArea() int32 { + if x != nil { + return x.SArea + } + return 0 +} + +func (x *SSPacketTransit) GetSType() int32 { + if x != nil { + return x.SType + } + return 0 +} + +func (x *SSPacketTransit) GetSId() int32 { + if x != nil { + return x.SId + } + return 0 +} + +func (x *SSPacketTransit) GetPacketId() int32 { + if x != nil { + return x.PacketId + } + return 0 +} + +func (x *SSPacketTransit) GetData() []byte { + if x != nil { + return x.Data + } + return nil +} + +var File_transit_proto protoreflect.FileDescriptor + +var file_transit_proto_rawDesc = []byte{ + 0x0a, 0x0d, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x69, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, + 0x08, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x22, 0x7f, 0x0a, 0x0f, 0x53, 0x53, 0x50, + 0x61, 0x63, 0x6b, 0x65, 0x74, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x69, 0x74, 0x12, 0x14, 0x0a, 0x05, + 0x53, 0x41, 0x72, 0x65, 0x61, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x53, 0x41, 0x72, + 0x65, 0x61, 0x12, 0x14, 0x0a, 0x05, 0x53, 0x54, 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x05, 0x52, 0x05, 0x53, 0x54, 0x79, 0x70, 0x65, 0x12, 0x10, 0x0a, 0x03, 0x53, 0x49, 0x64, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x05, 0x52, 0x03, 0x53, 0x49, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x50, 0x61, + 0x63, 0x6b, 0x65, 0x74, 0x49, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x05, 0x52, 0x08, 0x50, 0x61, + 0x63, 0x6b, 0x65, 0x74, 0x49, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x44, 0x61, 0x74, 0x61, 0x18, 0x05, + 0x20, 0x01, 0x28, 0x0c, 0x52, 0x04, 0x44, 0x61, 0x74, 0x61, 0x42, 0x0c, 0x5a, 0x0a, 0x2e, 0x3b, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_transit_proto_rawDescOnce sync.Once + file_transit_proto_rawDescData = file_transit_proto_rawDesc +) + +func file_transit_proto_rawDescGZIP() []byte { + file_transit_proto_rawDescOnce.Do(func() { + file_transit_proto_rawDescData = protoimpl.X.CompressGZIP(file_transit_proto_rawDescData) + }) + return file_transit_proto_rawDescData +} + +var file_transit_proto_msgTypes = make([]protoimpl.MessageInfo, 1) +var file_transit_proto_goTypes = []interface{}{ + (*SSPacketTransit)(nil), // 0: protocol.SSPacketTransit +} +var file_transit_proto_depIdxs = []int32{ + 0, // [0:0] is the sub-list for method output_type + 0, // [0:0] is the sub-list for method input_type + 0, // [0:0] is the sub-list for extension type_name + 0, // [0:0] is the sub-list for extension extendee + 0, // [0:0] is the sub-list for field type_name +} + +func init() { file_transit_proto_init() } +func file_transit_proto_init() { + if File_transit_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_transit_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SSPacketTransit); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_transit_proto_rawDesc, + NumEnums: 0, + NumMessages: 1, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_transit_proto_goTypes, + DependencyIndexes: file_transit_proto_depIdxs, + MessageInfos: file_transit_proto_msgTypes, + }.Build() + File_transit_proto = out.File + file_transit_proto_rawDesc = nil + file_transit_proto_goTypes = nil + file_transit_proto_depIdxs = nil +} diff --git a/srvlib/protocol/transit.proto b/srvlib/protocol/transit.proto new file mode 100644 index 0000000..b2bd191 --- /dev/null +++ b/srvlib/protocol/transit.proto @@ -0,0 +1,11 @@ +syntax = "proto3"; +package protocol; +option go_package = ".;protocol"; + +message SSPacketTransit { + int32 SArea = 1; + int32 SType = 2; + int32 SId = 3; + int32 PacketId = 4; + bytes Data = 5; +} \ No newline at end of file diff --git a/srvlib/serversessionmgr.go b/srvlib/serversessionmgr.go new file mode 100644 index 0000000..94e1418 --- /dev/null +++ b/srvlib/serversessionmgr.go @@ -0,0 +1,199 @@ +package srvlib + +import ( + "strings" + + "mongo.games.com/goserver/core/logger" + "mongo.games.com/goserver/core/netlib" + "mongo.games.com/goserver/srvlib/protocol" +) + +/* + 服务信息注册 + 服务信息->session +*/ + +var ( + SessionAttributeServerInfo = &ServerSessionMgr{} + ServerSessionMgrSington = &ServerSessionMgr{sessions: make(map[int]map[int]map[int]*netlib.Session)} +) + +type ServerSessionRegisteListener interface { + OnRegiste(*netlib.Session) + OnUnregiste(*netlib.Session) +} + +type ServerSessionMgr struct { + sessions map[int]map[int]map[int]*netlib.Session //keys=>areaid:type:id + listeners []ServerSessionRegisteListener +} + +func (ssm *ServerSessionMgr) AddListener(l ServerSessionRegisteListener) ServerSessionRegisteListener { + ssm.listeners = append(ssm.listeners, l) + return l +} + +func (ssm *ServerSessionMgr) RegisteSession(s *netlib.Session) bool { + attr := s.GetAttribute(SessionAttributeServerInfo) + if attr != nil { + if srvInfo, ok := attr.(*protocol.SSSrvRegiste); ok && srvInfo != nil { + areaId := int(srvInfo.GetAreaId()) + srvType := int(srvInfo.GetType()) + srvId := int(srvInfo.GetId()) + if a, exist := ssm.sessions[areaId]; !exist { + ssm.sessions[areaId] = make(map[int]map[int]*netlib.Session) + a = ssm.sessions[areaId] + a[srvType] = make(map[int]*netlib.Session) + } else { + if _, exist := a[srvType]; !exist { + a[srvType] = make(map[int]*netlib.Session) + } + } + + if _, exist := ssm.sessions[areaId][srvType][srvId]; !exist { + logger.Logger.Infof("(ssm *ServerSessionMgr) RegisteSession %v", srvInfo) + ssm.sessions[areaId][srvType][srvId] = s + if len(ssm.listeners) != 0 { + for _, l := range ssm.listeners { + l.OnRegiste(s) + } + } + } else { + logger.Logger.Warnf("###(ssm *ServerSessionMgr) RegisteSession repeated areaid:%v srvType:%v srvId:%v", areaId, srvType, srvId) + } + } + } else { + logger.Logger.Warnf("ServerSessionMgr.RegisteSession SessionAttributeServerInfo=nil") + } + return true +} + +func (ssm *ServerSessionMgr) UnregisteSession(s *netlib.Session) bool { + attr := s.GetAttribute(SessionAttributeServerInfo) + if attr != nil { + if srvInfo, ok := attr.(*protocol.SSSrvRegiste); ok && srvInfo != nil { + logger.Logger.Infof("ServerSessionMgr.UnregisteSession try %v", srvInfo) + areaId := int(srvInfo.GetAreaId()) + srvType := int(srvInfo.GetType()) + srvId := int(srvInfo.GetId()) + if a, exist := ssm.sessions[areaId]; exist { + if b, exist := a[srvType]; exist { + if _, exist := b[srvId]; exist { + logger.Logger.Infof("ServerSessionMgr.UnregisteSession %v success", srvInfo) + delete(b, srvId) + if len(ssm.listeners) != 0 { + for _, l := range ssm.listeners { + l.OnUnregiste(s) + } + } + } else { + logger.Logger.Warnf("(ssm *ServerSessionMgr) UnregisteSession found not fit session, area:%v type:%v id:%v", areaId, srvType, srvId) + } + } + } + } + } + return true +} + +func (ssm *ServerSessionMgr) GetSession(areaId, srvType, srvId int) *netlib.Session { + if a, exist := ssm.sessions[areaId]; exist { + if b, exist := a[srvType]; exist { + if c, exist := b[srvId]; exist { + return c + } + } + } + return nil +} + +func (ssm *ServerSessionMgr) GetSessions(areaId, srvType int) (sessions []*netlib.Session) { + if a, exist := ssm.sessions[areaId]; exist { + if b, exist := a[srvType]; exist { + for _, s := range b { + sessions = append(sessions, s) + } + } + } + return +} + +func (ssm *ServerSessionMgr) GetServerId(areaId, srvType int) int { + if a, exist := ssm.sessions[areaId]; exist { + if b, exist := a[srvType]; exist { + for sid, _ := range b { + return sid + } + } + } + return -1 +} + +func (ssm *ServerSessionMgr) GetServerIdByMaxData(areaId, srvType int) int { + var bestSid int = -1 + var data string + if a, exist := ssm.sessions[areaId]; exist { + if b, exist := a[srvType]; exist { + for sid, s := range b { + if srvInfo, ok := s.GetAttribute(SessionAttributeServerInfo).(*protocol.SSSrvRegiste); ok && srvInfo != nil { + if strings.Compare(data, srvInfo.GetData()) <= 0 { + data = srvInfo.GetData() + bestSid = sid + } + } + } + } + } + return bestSid +} + +func (ssm *ServerSessionMgr) GetServerIds(areaId, srvType int) (ids []int) { + if a, exist := ssm.sessions[areaId]; exist { + if b, exist := a[srvType]; exist { + for sid, _ := range b { + ids = append(ids, sid) + } + } + } + return +} + +func (ssm *ServerSessionMgr) Broadcast(packetid int, pack interface{}, areaId, srvType int) { + if areaId >= 0 { + if srvType >= 0 { + if a, exist := ssm.sessions[areaId]; exist { + if b, exist := a[srvType]; exist { + for _, s := range b { + s.Send(packetid, pack) + } + } + } + } else { + if a, exist := ssm.sessions[areaId]; exist { + for _, b := range a { + for _, s := range b { + s.Send(packetid, pack) + } + } + } + } + } else { + if srvType >= 0 { + for _, a := range ssm.sessions { + if b, exist := a[srvType]; exist { + for _, s := range b { + s.Send(packetid, pack) + } + } + } + } else { + for _, a := range ssm.sessions { + for _, b := range a { + for _, s := range b { + s.Send(packetid, pack) + } + } + } + } + } +} diff --git a/srvlib/servicemgr.go b/srvlib/servicemgr.go new file mode 100644 index 0000000..59b7477 --- /dev/null +++ b/srvlib/servicemgr.go @@ -0,0 +1,303 @@ +package srvlib + +import ( + "os" + "strconv" + "strings" + "time" + + "mongo.games.com/goserver/core/logger" + "mongo.games.com/goserver/core/netlib" + "mongo.games.com/goserver/srvlib/protocol" +) + +var ( + SessionAttributeServiceInfo = &serviceMgr{} + SessionAttributeServiceFlag = &serviceMgr{} + ServiceMgr = &serviceMgr{servicesPool: make(map[int32]map[int32]*protocol.ServiceInfo)} +) + +type ServiceRegisteListener interface { + OnRegiste([]*protocol.ServiceInfo) + OnUnregiste(*protocol.ServiceInfo) +} + +type serviceMgr struct { + servicesPool map[int32]map[int32]*protocol.ServiceInfo // srvType:srvId:ServiceInfo + listeners []ServiceRegisteListener +} + +func (this *serviceMgr) AddListener(l ServiceRegisteListener) ServiceRegisteListener { + this.listeners = append(this.listeners, l) + return l +} + +func (this *serviceMgr) RegisteService(s *netlib.Session, services []*protocol.ServiceInfo) { + if this == nil || s == nil || len(services) == 0 { + return + } + + // 根据对方提供的服务信息获取对方应该和那些其它服务建立连接,然后让其它服务连接这个刚注册的服务(service) + s.SetAttribute(SessionAttributeServiceInfo, services) // 保存监听服务配置 + for _, service := range services { + srvid := service.GetSrvId() + srvtype := service.GetSrvType() + if _, has := this.servicesPool[srvtype]; !has { + this.servicesPool[srvtype] = make(map[int32]*protocol.ServiceInfo) + } + if _, exist := this.servicesPool[srvtype][srvid]; !exist { + this.servicesPool[srvtype][srvid] = service + logger.Logger.Info("(this *serviceMgr) RegisteService: ", service.GetSrvName(), " Ip=", service.GetIp(), " Port=", service.GetPort()) + pack := &protocol.SSServiceInfo{} + pack.Service = service + sessiontypes := GetCareSessionsByService(service.GetSrvType()) + areaId := service.GetAreaId() + for _, v1 := range sessiontypes { + // 地区和服务类型 + ServerSessionMgrSington.Broadcast(int(protocol.SrvlibPacketID_PACKET_SS_SERVICE_INFO), pack, int(areaId), int(v1)) + } + + if len(this.listeners) != 0 { + for _, l := range this.listeners { + l.OnRegiste(services) + } + } + } + } +} + +func (this *serviceMgr) UnregisteService(service *protocol.ServiceInfo) { + if this == nil || service == nil { + return + } + + srvid := service.GetSrvId() + srvtype := service.GetSrvType() + if v, has := this.servicesPool[srvtype]; has { + if ss, exist := v[srvid]; exist && ss == service { + delete(v, srvid) + logger.Logger.Info("(this *serviceMgr) UnregisteService: ", srvid) + pack := &protocol.SSServiceShut{} + pack.Service = service + sessiontypes := GetCareSessionsByService(service.GetSrvType()) + areaId := service.GetAreaId() + for _, v1 := range sessiontypes { + ServerSessionMgrSington.Broadcast(int(protocol.SrvlibPacketID_PACKET_SS_SERVICE_SHUT), pack, int(areaId), int(v1)) + } + if len(this.listeners) != 0 { + for _, l := range this.listeners { + l.OnUnregiste(service) + } + } + } + } + +} + +func (this *serviceMgr) OnRegiste(s *netlib.Session) { + if this == nil || s == nil { + return + } + + if s.GetAttribute(SessionAttributeServiceFlag) == nil { + return + } + // 根据对方的服务信息找需要建立连接的其它服务,然后发送给对方,对方主动和其它服务建立连接 + attr := s.GetAttribute(SessionAttributeServerInfo) + if attr != nil { + if srvInfo, ok := attr.(*protocol.SSSrvRegiste); ok && srvInfo != nil { + services := GetCareServicesBySession(srvInfo.GetType()) + for _, v1 := range services { + if v2, has := this.servicesPool[v1]; has { + for _, v3 := range v2 { + func(si *protocol.ServiceInfo, sInfo *protocol.SSSrvRegiste) { + pack := &protocol.SSServiceInfo{} + pack.Service = si + logger.Logger.Info("serviceMgr.OnRegiste Server Type=", sInfo.GetType(), " Id=", sInfo.GetId(), " Name=", sInfo.GetName(), " careful => Service=", si) + s.Send(int(protocol.SrvlibPacketID_PACKET_SS_SERVICE_INFO), pack) + }(v3, srvInfo) + } + } + } + } + } +} + +func (this *serviceMgr) OnUnregiste(s *netlib.Session) { + +} + +func (this *serviceMgr) ClearServiceBySession(s *netlib.Session) { + attr := s.GetAttribute(SessionAttributeServiceInfo) + if attr != nil { + if services, ok := attr.([]*protocol.ServiceInfo); ok { + for _, service := range services { + this.UnregisteService(service) + } + } + s.RemoveAttribute(SessionAttributeServiceInfo) + } +} + +func (this *serviceMgr) ReportService(s *netlib.Session) { + acceptors := netlib.GetAcceptors() + cnt := len(acceptors) + if cnt > 0 { + pack := &protocol.SSServiceRegiste{ + Services: make([]*protocol.ServiceInfo, 0, cnt), + } + for _, v := range acceptors { + addr := v.Addr() + if addr == nil { + continue + } + network := addr.Network() + s := addr.String() + ipAndPort := strings.Split(s, ":") + if len(ipAndPort) < 2 { + continue + } + + port, err := strconv.Atoi(ipAndPort[len(ipAndPort)-1]) + if err != nil { + continue + } + + sc := v.GetSessionConfig() + si := &protocol.ServiceInfo{ + AreaId: int32(sc.AreaId), + SrvId: int32(sc.Id), + SrvType: int32(sc.Type), + SrvPID: int32(os.Getpid()), + SrvName: sc.Name, + NetworkType: network, + Ip: sc.Ip, + Port: int32(port), + WriteTimeOut: int32(sc.WriteTimeout / time.Second), + ReadTimeOut: int32(sc.ReadTimeout / time.Second), + IdleTimeOut: int32(sc.IdleTimeout / time.Second), + MaxDone: int32(sc.MaxDone), + MaxPend: int32(sc.MaxPend), + MaxPacket: int32(sc.MaxPacket), + RcvBuff: int32(sc.RcvBuff), + SndBuff: int32(sc.SndBuff), + SoLinger: int32(sc.SoLinger), + KeepAlive: sc.KeepAlive, + NoDelay: sc.NoDelay, + IsAutoReconn: sc.IsAutoReconn, + IsInnerLink: sc.IsInnerLink, + SupportFragment: sc.SupportFragment, + AllowMultiConn: sc.AllowMultiConn, + AuthKey: sc.AuthKey, + EncoderName: sc.EncoderName, + DecoderName: sc.DecoderName, + FilterChain: sc.FilterChain, + HandlerChain: sc.HandlerChain, + Protocol: sc.Protocol, + Path: sc.Path, + OuterIp: sc.OuterIp, + } + pack.Services = append(pack.Services, si) + } + s.Send(int(protocol.SrvlibPacketID_PACKET_SS_SERVICE_REGISTE), pack) + } +} + +func (this *serviceMgr) GetServices(srvtype int32) map[int32]*protocol.ServiceInfo { + if v, has := this.servicesPool[srvtype]; has { + return v + } + return nil +} + +func (this *serviceMgr) GetService(srvtype, srvid int32) *protocol.ServiceInfo { + if v, has := this.servicesPool[srvtype]; has { + if vv, has := v[srvid]; has { + return vv + } + } + return nil +} + +func init() { + // service registe + netlib.RegisterFactory(int(protocol.SrvlibPacketID_PACKET_SS_SERVICE_REGISTE), netlib.PacketFactoryWrapper(func() interface{} { + return &protocol.SSServiceRegiste{} + })) + netlib.RegisterHandler(int(protocol.SrvlibPacketID_PACKET_SS_SERVICE_REGISTE), netlib.HandlerWrapper(func(s *netlib.Session, packetid int, pack interface{}) error { + if sr, ok := pack.(*protocol.SSServiceRegiste); ok { + ServiceMgr.RegisteService(s, sr.GetServices()) + } + return nil + })) + + // service info + netlib.RegisterFactory(int(protocol.SrvlibPacketID_PACKET_SS_SERVICE_INFO), netlib.PacketFactoryWrapper(func() interface{} { + return &protocol.SSServiceInfo{} + })) + netlib.RegisterHandler(int(protocol.SrvlibPacketID_PACKET_SS_SERVICE_INFO), netlib.HandlerWrapper(func(s *netlib.Session, packetid int, pack interface{}) error { + if sr, ok := pack.(*protocol.SSServiceInfo); ok { + service := sr.GetService() + if service != nil { + sc := &netlib.SessionConfig{ + Id: int(service.GetSrvId()), + Type: int(service.GetSrvType()), + AreaId: int(service.GetAreaId()), + Name: service.GetSrvName(), + Ip: service.GetIp(), + OuterIp: service.GetOuterIp(), + Port: int(service.GetPort()), + WriteTimeout: time.Duration(service.GetWriteTimeOut()), + ReadTimeout: time.Duration(service.GetReadTimeOut()), + IdleTimeout: time.Duration(service.GetIdleTimeOut()), + MaxDone: int(service.GetMaxDone()), + MaxPend: int(service.GetMaxPend()), + MaxPacket: int(service.GetMaxPacket()), + RcvBuff: int(service.GetRcvBuff()), + SndBuff: int(service.GetSndBuff()), + IsClient: true, + IsAutoReconn: true, + AuthKey: service.GetAuthKey(), + SoLinger: int(service.GetSoLinger()), + KeepAlive: service.GetKeepAlive(), + NoDelay: service.GetNoDelay(), + IsInnerLink: service.GetIsInnerLink(), + SupportFragment: service.GetSupportFragment(), + AllowMultiConn: service.GetAllowMultiConn(), + EncoderName: service.GetEncoderName(), + DecoderName: service.GetDecoderName(), + FilterChain: service.GetFilterChain(), + HandlerChain: service.GetHandlerChain(), + Protocol: service.GetProtocol(), + Path: service.GetPath(), + } + if !sc.AllowMultiConn && netlib.ConnectorMgr.IsConnecting(sc) { + logger.Logger.Warnf("%v:%v %v:%v had connected, not allow multiple connects", sc.Id, sc.Name, sc.Ip, sc.Port) + return nil + } + sc.Init() + err := netlib.Connect(sc) + if err != nil { + logger.Logger.Warn("connect server failed err:", err) + } + } + } + return nil + })) + + // service shutdown + netlib.RegisterFactory(int(protocol.SrvlibPacketID_PACKET_SS_SERVICE_SHUT), netlib.PacketFactoryWrapper(func() interface{} { + return &protocol.SSServiceShut{} + })) + netlib.RegisterHandler(int(protocol.SrvlibPacketID_PACKET_SS_SERVICE_SHUT), netlib.HandlerWrapper(func(s *netlib.Session, packetid int, pack interface{}) error { + if sr, ok := pack.(*protocol.SSServiceShut); ok { + service := sr.GetService() + if service != nil { + netlib.ShutConnector(service.GetIp(), int(service.GetPort())) + } + } + return nil + })) + + ServerSessionMgrSington.AddListener(ServiceMgr) +} diff --git a/srvlib/sessionid.go b/srvlib/sessionid.go new file mode 100644 index 0000000..a3bd4ce --- /dev/null +++ b/srvlib/sessionid.go @@ -0,0 +1,60 @@ +package srvlib + +import ( + "mongo.games.com/goserver/core/netlib" +) + +const ( + SessionIdSeqIdBits uint32 = 32 + SessionIdSrvIdBits = 16 + SessionIdSrvTypeBits = 8 + SessionIdSrvAreaIdBits = 8 + SessionIdSrvIdOffset = SessionIdSeqIdBits + SessionIdSrvTypeOffset = SessionIdSrvIdOffset + SessionIdSrvIdBits + SessionIdSrvAreaOffset = SessionIdSrvTypeOffset + SessionIdSrvTypeBits + SessionIdSeqIdMask = 1<>SessionIdSrvAreaOffset) & SessionIdSrvAreaIdMask +} + +func (id SessionId) SrvType() uint32 { + return uint32(id>>SessionIdSrvTypeOffset) & SessionIdSrvTypeMask +} + +func (id SessionId) SrvId() uint32 { + return uint32(id>>SessionIdSrvIdOffset) & SessionIdSrvIdMask +} + +func (id SessionId) SeqId() uint32 { + return uint32(id) & SessionIdSeqIdMask +} diff --git a/srvlib/srvertable.go b/srvlib/srvertable.go new file mode 100644 index 0000000..435d95d --- /dev/null +++ b/srvlib/srvertable.go @@ -0,0 +1,75 @@ +package srvlib + +var ( + sessionServiceERtable = make(map[int32][]int32) + serviceSessionERtable = make(map[int32][]int32) +) + +var arrER = [][]int32{ + {0, 0, 0, 0, 0, 0, 0, 0, 0}, + {0, 0, 0, 0, 0, 0, 0, 0, 0}, + {0, 0, 0, 0, 0, 0, 0, 0, 0}, + {0, 0, 0, 0, 0, 0, 0, 0, 0}, + {2, 3, 6, 7, 8, 0, 0, 0, 0}, + {0, 0, 0, 0, 0, 0, 0, 0, 0}, + {3, 0, 0, 0, 0, 0, 0, 0, 0}, + {6, 0, 0, 0, 0, 0, 0, 0, 0}, + {0, 0, 0, 0, 0, 0, 0, 0, 0}, +} + +func init() { + buildSessionTable() + buildServiceTable() +} + +func buildSessionTable() { + for k1, v1 := range arrER { + t := make([]int32, 0, MaxServerType) + for _, v2 := range v1 { + if v2 != 0 { + t = append(t, int32(v2)) + } + } + sessionServiceERtable[int32(k1)] = t + } +} + +func buildServiceTable() { + for k1, v1 := range sessionServiceERtable { + for _, v2 := range v1 { + if _, has := serviceSessionERtable[v2]; !has { + serviceSessionERtable[v2] = make([]int32, 0, MaxServerType) + } + + serviceSessionERtable[v2] = append(serviceSessionERtable[v2], k1) + } + } +} + +func SessionCareService(sessionType, serviceType int32) bool { + if v, has := sessionServiceERtable[sessionType]; has { + for _, service := range v { + if service == serviceType { + return true + } + } + } + + return false +} + +func GetCareSessionsByService(serviceType int32) []int32 { + if v, has := serviceSessionERtable[serviceType]; has { + return v + } + + return nil +} + +func GetCareServicesBySession(sessionType int32) []int32 { + if v, has := sessionServiceERtable[sessionType]; has { + return v + } + + return nil +} diff --git a/srvlib/txcommskeleton.go b/srvlib/txcommskeleton.go new file mode 100644 index 0000000..db5be7a --- /dev/null +++ b/srvlib/txcommskeleton.go @@ -0,0 +1,73 @@ +package srvlib + +import ( + "mongo.games.com/goserver/core/builtin/action" + "mongo.games.com/goserver/core/builtin/protocol" + "mongo.games.com/goserver/core/logger" + "mongo.games.com/goserver/core/netlib" + "mongo.games.com/goserver/core/transact" +) + +type TxCommSkeleton struct { +} + +func (tcs *TxCommSkeleton) SendTransResult(parent, me *transact.TransNodeParam, tr *transact.TransResult) bool { + //logger.Logger.Trace("TxCommSkeleton.SendTransResult") + p := action.ContructTxResultPacket(parent, me, tr) + if p == nil { + return false + } + s := ServerSessionMgrSington.GetSession(parent.AreaID, int(parent.Ot), parent.Oid) + if s == nil { + logger.Logger.Trace("TxCommSkeleton.SendTransResult s=nil") + return false + } + + s.Send(int(protocol.CoreBuiltinPacketID_PACKET_SS_TX_RESULT), p) + //logger.Logger.Trace("TxCommSkeleton.SendTransResult success") + return true +} + +func (tcs *TxCommSkeleton) SendTransStart(parent, me *transact.TransNodeParam, ud interface{}) bool { + //logger.Logger.Trace("TxCommSkeleton.SendTransStart") + p := action.ContructTxStartPacket(parent, me, ud) + if p == nil { + return false + } + s := ServerSessionMgrSington.GetSession(me.AreaID, int(me.Ot), me.Oid) + if s == nil { + logger.Logger.Trace("TxCommSkeleton.SendTransStart s=nil") + return false + } + + s.Send(int(protocol.CoreBuiltinPacketID_PACKET_SS_TX_START), p) + return true +} + +func (tcs *TxCommSkeleton) SendCmdToTransNode(tnp *transact.TransNodeParam, c transact.TransCmd) bool { + //logger.Logger.Trace("TxCommSkeleton.SendCmdToTransNode") + p := action.ConstructTxCmdPacket(tnp, c) + if p == nil { + return false + } + s := ServerSessionMgrSington.GetSession(tnp.AreaID, int(tnp.Ot), tnp.Oid) + if s == nil { + logger.Logger.Trace("TxCommSkeleton.SendCmdToTransNode s=nil") + return false + } + + s.Send(int(protocol.CoreBuiltinPacketID_PACKET_SS_TX_CMD), p) + return true +} + +func (tcs *TxCommSkeleton) GetSkeletonID() int { + return netlib.Config.SrvInfo.Id +} + +func (tcs *TxCommSkeleton) GetAreaID() int { + return netlib.Config.SrvInfo.AreaID +} + +func init() { + transact.RegisteTxCommSkeleton("mongo.games.com/goserver/srvlib/txcommskeleton", &TxCommSkeleton{}) +}