@@ -4,13 +4,10 @@ import {RuleHelper} from "textlint-rule-helper";
4
4
import { getTokenizer } from "kuromojin" ;
5
5
import splitSentences , { Syntax as SentenceSyntax } from "sentence-splitter" ;
6
6
import StringSource from "textlint-util-to-string" ;
7
- // 助詞どうか
8
- const is助詞Token = token => {
9
- return token . pos === "助詞" ;
10
- } ;
11
- const is読点Token = token => {
12
- return token . surface_form === "、" && token . pos === "名詞" ;
13
- } ;
7
+ import {
8
+ is助詞Token , is読点Token ,
9
+ createKeyFromKey , restoreToSurfaceFromKey
10
+ } from "./token-utils" ;
14
11
/**
15
12
* Create token map object
16
13
* {
@@ -23,11 +20,12 @@ const is読点Token = token => {
23
20
function createSurfaceKeyMap ( tokens ) {
24
21
// 助詞のみを対象とする
25
22
return tokens . filter ( is助詞Token ) . reduce ( ( keyMap , token ) => {
26
- // "は" : [token]
27
- if ( ! keyMap [ token . surface_form ] ) {
28
- keyMap [ token . surface_form ] = [ ] ;
23
+ // "は:助詞.係助詞" : [token]
24
+ const tokenKey = createKeyFromKey ( token ) ;
25
+ if ( ! keyMap [ tokenKey ] ) {
26
+ keyMap [ tokenKey ] = [ ] ;
29
27
}
30
- keyMap [ token . surface_form ] . push ( token ) ;
28
+ keyMap [ tokenKey ] . push ( token ) ;
31
29
return keyMap ;
32
30
} , { } ) ;
33
31
}
@@ -100,12 +98,13 @@ export default function (context, options = {}) {
100
98
101
99
joshiTokens = [tokenA, tokenB, tokenC, tokenD, tokenE, tokenF]
102
100
joshiTokenSurfaceKeyMap = {
103
- "は": [tokenA, tokenC, tokenE],
104
- "で": [tokenB, tokenD, tokenF]
101
+ "は:助詞.係助詞 ": [tokenA, tokenC, tokenE],
102
+ "で:助詞.係助詞 ": [tokenB, tokenD, tokenF]
105
103
}
106
104
*/
107
105
Object . keys ( joshiTokenSurfaceKeyMap ) . forEach ( key => {
108
- let tokens = joshiTokenSurfaceKeyMap [ key ] ;
106
+ const tokens = joshiTokenSurfaceKeyMap [ key ] ;
107
+ const joshiName = restoreToSurfaceFromKey ( key ) ;
109
108
// strict mode ではない時例外を除去する
110
109
if ( ! isStrict ) {
111
110
if ( matchExceptionRule ( tokens ) ) {
@@ -117,27 +116,28 @@ export default function (context, options = {}) {
117
116
}
118
117
// if found differenceIndex less than
119
118
// tokes are sorted ascending order
120
- tokens . reduce ( ( prev , current ) => {
121
- let startPosition = countableTokens . indexOf ( prev ) ;
122
- let otherPosition = countableTokens . indexOf ( current ) ;
123
- // if difference
124
- let differenceIndex = otherPosition - startPosition ;
119
+ var reduder = ( prev , current ) => {
120
+ const startPosition = countableTokens . indexOf ( prev ) ;
121
+ const otherPosition = countableTokens . indexOf ( current ) ;
122
+ // 助詞token同士の距離が設定値以下ならエラーを報告する
123
+ const differenceIndex = otherPosition - startPosition ;
125
124
if ( differenceIndex <= minInterval ) {
126
- let originalPosition = source . originalPositionFor ( {
125
+ const originalPosition = source . originalPositionFor ( {
127
126
line : sentence . loc . start . line ,
128
127
column : sentence . loc . start . column + ( current . word_position - 1 )
129
128
} ) ;
130
- // padding position
131
- var padding = {
129
+ // padding positionを計算する
130
+ const padding = {
132
131
line : originalPosition . line - 1 ,
133
132
// matchLastToken.word_position start with 1
134
133
// this is padding column start with 0 (== -1)
135
134
column : originalPosition . column
136
135
} ;
137
- report ( node , new RuleError ( `一文に二回以上利用されている助詞 "${ key } " がみつかりました。` , padding ) ) ;
136
+ report ( node , new RuleError ( `一文に二回以上利用されている助詞 "${ joshiName } " がみつかりました。` , padding ) ) ;
138
137
}
139
138
return current ;
140
- } ) ;
139
+ } ;
140
+ tokens . reduce ( reduder ) ;
141
141
} ) ;
142
142
} ;
143
143
sentences . forEach ( checkSentence ) ;
0 commit comments